1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK4
8 
9 // RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
10 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15 
16 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK10
19 
20 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
21 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
23 
24 // expected-no-diagnostics
25 #ifndef HEADER
26 #define HEADER
27 
28 struct St {
29   int a, b;
StSt30   St() : a(0), b(0) {}
StSt31   St(const St &st) : a(st.a + st.b), b(0) {}
~StSt32   ~St() {}
33 };
34 
35 volatile int g = 1212;
36 volatile int &g1 = g;
37 
38 template <class T>
39 struct S {
40   T f;
SS41   S(T a) : f(a + g) {}
SS42   S() : f(g) {}
SS43   S(const S &s, St t = St()) : f(s.f + t.a) {}
operator TS44   operator T() { return T(); }
~SS45   ~S() {}
46 };
47 
48 
49 template <typename T>
tmain()50 T tmain() {
51   S<T> test;
52   T t_var = T();
53   T vec[] = {1, 2};
54   S<T> s_arr[] = {1, 2};
55   S<T> &var = test;
56 #pragma omp target teams distribute private(t_var, vec, s_arr, var)
57   for (int i = 0; i < 2; ++i) {
58     vec[i] = t_var;
59     s_arr[i] = var;
60   }
61   return T();
62 }
63 
64 S<float> test;
65 int t_var = 333;
66 int vec[] = {1, 2};
67 S<float> s_arr[] = {1, 2};
68 S<float> var(3);
69 
main()70 int main() {
71   static int sivar;
72 #ifdef LAMBDA
73   [&]() {
74 #pragma omp target teams distribute private(g, g1, sivar)
75   for (int i = 0; i < 2; ++i) {
76 
77     // Skip global, bound tid and loop vars
78     g = 1;
79     g1 = 1;
80     sivar = 2;
81     [&]() {
82       g = 2;
83       g1 = 2;
84       sivar = 4;
85 
86     }();
87   }
88   }();
89   return 0;
90 #else
91 #pragma omp target teams distribute private(t_var, vec, s_arr, var, sivar)
92   for (int i = 0; i < 2; ++i) {
93     vec[i] = t_var;
94     s_arr[i] = var;
95     sivar += i;
96   }
97   return tmain<int>();
98 #endif
99 }
100 
101 
102 
103 // Skip global, bound tid and loop vars
104 
105 // private(s_arr)
106 
107 // private(var)
108 
109 
110 
111 
112 
113 // Skip global, bound tid and loop vars
114 
115 // private(s_arr)
116 
117 
118 // private(var)
119 
120 
121 #endif
122 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init
123 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
124 // CHECK1-NEXT:  entry:
125 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
126 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
127 // CHECK1-NEXT:    ret void
128 //
129 //
130 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
131 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
132 // CHECK1-NEXT:  entry:
133 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
134 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
135 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
136 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
137 // CHECK1-NEXT:    ret void
138 //
139 //
140 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
141 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
142 // CHECK1-NEXT:  entry:
143 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
144 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
145 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
146 // CHECK1-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
147 // CHECK1-NEXT:    ret void
148 //
149 //
150 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
151 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
152 // CHECK1-NEXT:  entry:
153 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
154 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
155 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
156 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
157 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
158 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
159 // CHECK1-NEXT:    store float [[CONV]], float* [[F]], align 4
160 // CHECK1-NEXT:    ret void
161 //
162 //
163 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
164 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
165 // CHECK1-NEXT:  entry:
166 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
167 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
168 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
169 // CHECK1-NEXT:    ret void
170 //
171 //
172 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
173 // CHECK1-SAME: () #[[ATTR0]] {
174 // CHECK1-NEXT:  entry:
175 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00)
176 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00)
177 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
178 // CHECK1-NEXT:    ret void
179 //
180 //
181 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
182 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
183 // CHECK1-NEXT:  entry:
184 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
185 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
186 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
187 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
188 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
189 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
190 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
191 // CHECK1-NEXT:    ret void
192 //
193 //
194 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
195 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
196 // CHECK1-NEXT:  entry:
197 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
198 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
199 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
200 // CHECK1:       arraydestroy.body:
201 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
202 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
203 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
204 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
205 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
206 // CHECK1:       arraydestroy.done1:
207 // CHECK1-NEXT:    ret void
208 //
209 //
210 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
211 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
212 // CHECK1-NEXT:  entry:
213 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
214 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
215 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
216 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
217 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
218 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
219 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
220 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
221 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
222 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
223 // CHECK1-NEXT:    store float [[ADD]], float* [[F]], align 4
224 // CHECK1-NEXT:    ret void
225 //
226 //
227 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
228 // CHECK1-SAME: () #[[ATTR0]] {
229 // CHECK1-NEXT:  entry:
230 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
231 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
232 // CHECK1-NEXT:    ret void
233 //
234 //
235 // CHECK1-LABEL: define {{[^@]+}}@main
236 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
237 // CHECK1-NEXT:  entry:
238 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
239 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
240 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
241 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2)
242 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
243 // CHECK1-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
244 // CHECK1-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
245 // CHECK1:       omp_offload.failed:
246 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91() #[[ATTR2]]
247 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
248 // CHECK1:       omp_offload.cont:
249 // CHECK1-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
250 // CHECK1-NEXT:    ret i32 [[CALL]]
251 //
252 //
253 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91
254 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
255 // CHECK1-NEXT:  entry:
256 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
257 // CHECK1-NEXT:    ret void
258 //
259 //
260 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
261 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
262 // CHECK1-NEXT:  entry:
263 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
264 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
265 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
266 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
267 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
268 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
269 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
270 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
271 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
272 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
273 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
274 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
275 // CHECK1-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
276 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
277 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
278 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
279 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
280 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
281 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
282 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
283 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
284 // CHECK1-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
285 // CHECK1-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
286 // CHECK1:       arrayctor.loop:
287 // CHECK1-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
288 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
289 // CHECK1-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
290 // CHECK1-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
291 // CHECK1-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
292 // CHECK1:       arrayctor.cont:
293 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
294 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
295 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
296 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
297 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
298 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
299 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
300 // CHECK1:       cond.true:
301 // CHECK1-NEXT:    br label [[COND_END:%.*]]
302 // CHECK1:       cond.false:
303 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
304 // CHECK1-NEXT:    br label [[COND_END]]
305 // CHECK1:       cond.end:
306 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
307 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
308 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
309 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
310 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
311 // CHECK1:       omp.inner.for.cond:
312 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
313 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
314 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
315 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
316 // CHECK1:       omp.inner.for.cond.cleanup:
317 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
318 // CHECK1:       omp.inner.for.body:
319 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
320 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
321 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
322 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
323 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
324 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
325 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
326 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
327 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
328 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I]], align 4
329 // CHECK1-NEXT:    [[IDXPROM2:%.*]] = sext i32 [[TMP10]] to i64
330 // CHECK1-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM2]]
331 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8*
332 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
333 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i64 4, i1 false)
334 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
335 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
336 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
337 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[SIVAR]], align 4
338 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
339 // CHECK1:       omp.body.continue:
340 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
341 // CHECK1:       omp.inner.for.inc:
342 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
343 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP15]], 1
344 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
345 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
346 // CHECK1:       omp.inner.for.end:
347 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
348 // CHECK1:       omp.loop.exit:
349 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
350 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
351 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
352 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
353 // CHECK1-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
354 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 2
355 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
356 // CHECK1:       arraydestroy.body:
357 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
358 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
359 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
360 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
361 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
362 // CHECK1:       arraydestroy.done7:
363 // CHECK1-NEXT:    ret void
364 //
365 //
366 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
367 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
368 // CHECK1-NEXT:  entry:
369 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
370 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
371 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
372 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
373 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
374 // CHECK1-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
375 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
376 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
377 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
378 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
379 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
380 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
381 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
382 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
383 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
384 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
385 // CHECK1-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
386 // CHECK1-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
387 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2)
388 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
389 // CHECK1-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
390 // CHECK1-NEXT:    br i1 [[TMP2]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
391 // CHECK1:       omp_offload.failed:
392 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
393 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
394 // CHECK1:       omp_offload.cont:
395 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
396 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
397 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
398 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
399 // CHECK1:       arraydestroy.body:
400 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
401 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
402 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
403 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
404 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
405 // CHECK1:       arraydestroy.done2:
406 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
407 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
408 // CHECK1-NEXT:    ret i32 [[TMP4]]
409 //
410 //
411 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
412 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
413 // CHECK1-NEXT:  entry:
414 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
415 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
416 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
417 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
418 // CHECK1-NEXT:    ret void
419 //
420 //
421 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
422 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
423 // CHECK1-NEXT:  entry:
424 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
425 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
426 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
427 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
428 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
429 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
430 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
431 // CHECK1-NEXT:    ret void
432 //
433 //
434 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
435 // CHECK1-SAME: () #[[ATTR4]] {
436 // CHECK1-NEXT:  entry:
437 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
438 // CHECK1-NEXT:    ret void
439 //
440 //
441 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
442 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
443 // CHECK1-NEXT:  entry:
444 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
445 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
446 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
447 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
448 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
449 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
450 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
451 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
452 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
453 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
454 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
455 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
456 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
457 // CHECK1-NEXT:    [[_TMP2:%.*]] = alloca %struct.S.0*, align 8
458 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
459 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
460 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
461 // CHECK1-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
462 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
463 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
464 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
465 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
466 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
467 // CHECK1-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
468 // CHECK1-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
469 // CHECK1:       arrayctor.loop:
470 // CHECK1-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
471 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
472 // CHECK1-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
473 // CHECK1-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
474 // CHECK1-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
475 // CHECK1:       arrayctor.cont:
476 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
477 // CHECK1-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 8
478 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
479 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
480 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
481 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
482 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
483 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
484 // CHECK1:       cond.true:
485 // CHECK1-NEXT:    br label [[COND_END:%.*]]
486 // CHECK1:       cond.false:
487 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
488 // CHECK1-NEXT:    br label [[COND_END]]
489 // CHECK1:       cond.end:
490 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
491 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
492 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
493 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
494 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
495 // CHECK1:       omp.inner.for.cond:
496 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
497 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
498 // CHECK1-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
499 // CHECK1-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
500 // CHECK1:       omp.inner.for.cond.cleanup:
501 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
502 // CHECK1:       omp.inner.for.body:
503 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
504 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
505 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
506 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
507 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
508 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
509 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
510 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
511 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
512 // CHECK1-NEXT:    [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 8
513 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
514 // CHECK1-NEXT:    [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
515 // CHECK1-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
516 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5]] to i8*
517 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
518 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false)
519 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
520 // CHECK1:       omp.body.continue:
521 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
522 // CHECK1:       omp.inner.for.inc:
523 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
524 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
525 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
526 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
527 // CHECK1:       omp.inner.for.end:
528 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
529 // CHECK1:       omp.loop.exit:
530 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
531 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
532 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
533 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
534 // CHECK1-NEXT:    [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
535 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN7]], i64 2
536 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
537 // CHECK1:       arraydestroy.body:
538 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
539 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
540 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
541 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
542 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
543 // CHECK1:       arraydestroy.done8:
544 // CHECK1-NEXT:    ret void
545 //
546 //
547 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
548 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
549 // CHECK1-NEXT:  entry:
550 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
551 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
552 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
553 // CHECK1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
554 // CHECK1-NEXT:    ret void
555 //
556 //
557 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
558 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
559 // CHECK1-NEXT:  entry:
560 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
561 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
562 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
563 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
564 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
565 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
566 // CHECK1-NEXT:    ret void
567 //
568 //
569 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
570 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
571 // CHECK1-NEXT:  entry:
572 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
573 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
574 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
575 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
576 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
577 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
578 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
579 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
580 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
581 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
582 // CHECK1-NEXT:    ret void
583 //
584 //
585 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
586 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
587 // CHECK1-NEXT:  entry:
588 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
589 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
590 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
591 // CHECK1-NEXT:    ret void
592 //
593 //
594 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
595 // CHECK1-SAME: () #[[ATTR0]] {
596 // CHECK1-NEXT:  entry:
597 // CHECK1-NEXT:    call void @__cxx_global_var_init()
598 // CHECK1-NEXT:    call void @__cxx_global_var_init.1()
599 // CHECK1-NEXT:    call void @__cxx_global_var_init.2()
600 // CHECK1-NEXT:    ret void
601 //
602 //
603 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
604 // CHECK1-SAME: () #[[ATTR0]] {
605 // CHECK1-NEXT:  entry:
606 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
607 // CHECK1-NEXT:    ret void
608 //
609 //
610 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init
611 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
612 // CHECK2-NEXT:  entry:
613 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
614 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
615 // CHECK2-NEXT:    ret void
616 //
617 //
618 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
619 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
620 // CHECK2-NEXT:  entry:
621 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
622 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
623 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
624 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
625 // CHECK2-NEXT:    ret void
626 //
627 //
628 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
629 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
630 // CHECK2-NEXT:  entry:
631 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
632 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
633 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
634 // CHECK2-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
635 // CHECK2-NEXT:    ret void
636 //
637 //
638 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
639 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
640 // CHECK2-NEXT:  entry:
641 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
642 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
643 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
644 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
645 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
646 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
647 // CHECK2-NEXT:    store float [[CONV]], float* [[F]], align 4
648 // CHECK2-NEXT:    ret void
649 //
650 //
651 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
652 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
653 // CHECK2-NEXT:  entry:
654 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
655 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
656 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
657 // CHECK2-NEXT:    ret void
658 //
659 //
660 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
661 // CHECK2-SAME: () #[[ATTR0]] {
662 // CHECK2-NEXT:  entry:
663 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00)
664 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00)
665 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
666 // CHECK2-NEXT:    ret void
667 //
668 //
669 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
670 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
671 // CHECK2-NEXT:  entry:
672 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
673 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
674 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
675 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
676 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
677 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
678 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
679 // CHECK2-NEXT:    ret void
680 //
681 //
682 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
683 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
684 // CHECK2-NEXT:  entry:
685 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
686 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
687 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
688 // CHECK2:       arraydestroy.body:
689 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
690 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
691 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
692 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
693 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
694 // CHECK2:       arraydestroy.done1:
695 // CHECK2-NEXT:    ret void
696 //
697 //
698 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
699 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
700 // CHECK2-NEXT:  entry:
701 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
702 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
703 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
704 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
705 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
706 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
707 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
708 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
709 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
710 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
711 // CHECK2-NEXT:    store float [[ADD]], float* [[F]], align 4
712 // CHECK2-NEXT:    ret void
713 //
714 //
715 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
716 // CHECK2-SAME: () #[[ATTR0]] {
717 // CHECK2-NEXT:  entry:
718 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
719 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
720 // CHECK2-NEXT:    ret void
721 //
722 //
723 // CHECK2-LABEL: define {{[^@]+}}@main
724 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] {
725 // CHECK2-NEXT:  entry:
726 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
727 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
728 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
729 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2)
730 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
731 // CHECK2-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
732 // CHECK2-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
733 // CHECK2:       omp_offload.failed:
734 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91() #[[ATTR2]]
735 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
736 // CHECK2:       omp_offload.cont:
737 // CHECK2-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
738 // CHECK2-NEXT:    ret i32 [[CALL]]
739 //
740 //
741 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91
742 // CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
743 // CHECK2-NEXT:  entry:
744 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
745 // CHECK2-NEXT:    ret void
746 //
747 //
748 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
749 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
750 // CHECK2-NEXT:  entry:
751 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
752 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
753 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
754 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
755 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
756 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
757 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
758 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
759 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
760 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
761 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
762 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
763 // CHECK2-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
764 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
765 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
766 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
767 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
768 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
769 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
770 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
771 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
772 // CHECK2-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
773 // CHECK2-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
774 // CHECK2:       arrayctor.loop:
775 // CHECK2-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
776 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
777 // CHECK2-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
778 // CHECK2-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
779 // CHECK2-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
780 // CHECK2:       arrayctor.cont:
781 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
782 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
783 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
784 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
785 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
786 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
787 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
788 // CHECK2:       cond.true:
789 // CHECK2-NEXT:    br label [[COND_END:%.*]]
790 // CHECK2:       cond.false:
791 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
792 // CHECK2-NEXT:    br label [[COND_END]]
793 // CHECK2:       cond.end:
794 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
795 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
796 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
797 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
798 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
799 // CHECK2:       omp.inner.for.cond:
800 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
801 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
802 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
803 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
804 // CHECK2:       omp.inner.for.cond.cleanup:
805 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
806 // CHECK2:       omp.inner.for.body:
807 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
808 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
809 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
810 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
811 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
812 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
813 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
814 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
815 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
816 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I]], align 4
817 // CHECK2-NEXT:    [[IDXPROM2:%.*]] = sext i32 [[TMP10]] to i64
818 // CHECK2-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM2]]
819 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8*
820 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
821 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i64 4, i1 false)
822 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
823 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
824 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
825 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[SIVAR]], align 4
826 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
827 // CHECK2:       omp.body.continue:
828 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
829 // CHECK2:       omp.inner.for.inc:
830 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
831 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP15]], 1
832 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
833 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
834 // CHECK2:       omp.inner.for.end:
835 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
836 // CHECK2:       omp.loop.exit:
837 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
838 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
839 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
840 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
841 // CHECK2-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
842 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 2
843 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
844 // CHECK2:       arraydestroy.body:
845 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
846 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
847 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
848 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
849 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
850 // CHECK2:       arraydestroy.done7:
851 // CHECK2-NEXT:    ret void
852 //
853 //
854 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
855 // CHECK2-SAME: () #[[ATTR6:[0-9]+]] comdat {
856 // CHECK2-NEXT:  entry:
857 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
858 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
859 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
860 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
861 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
862 // CHECK2-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
863 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
864 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
865 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
866 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR]], align 4
867 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
868 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
869 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
870 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
871 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
872 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
873 // CHECK2-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
874 // CHECK2-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
875 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2)
876 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
877 // CHECK2-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
878 // CHECK2-NEXT:    br i1 [[TMP2]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
879 // CHECK2:       omp_offload.failed:
880 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
881 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
882 // CHECK2:       omp_offload.cont:
883 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
884 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
885 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
886 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
887 // CHECK2:       arraydestroy.body:
888 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
889 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
890 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
891 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
892 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
893 // CHECK2:       arraydestroy.done2:
894 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
895 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
896 // CHECK2-NEXT:    ret i32 [[TMP4]]
897 //
898 //
899 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
900 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
901 // CHECK2-NEXT:  entry:
902 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
903 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
904 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
905 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
906 // CHECK2-NEXT:    ret void
907 //
908 //
909 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
910 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
911 // CHECK2-NEXT:  entry:
912 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
913 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
914 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
915 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
916 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
917 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
918 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
919 // CHECK2-NEXT:    ret void
920 //
921 //
922 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
923 // CHECK2-SAME: () #[[ATTR4]] {
924 // CHECK2-NEXT:  entry:
925 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
926 // CHECK2-NEXT:    ret void
927 //
928 //
929 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
930 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
931 // CHECK2-NEXT:  entry:
932 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
933 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
934 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
935 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
936 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
937 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
938 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
939 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
940 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
941 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
942 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
943 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
944 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
945 // CHECK2-NEXT:    [[_TMP2:%.*]] = alloca %struct.S.0*, align 8
946 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
947 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
948 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
949 // CHECK2-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
950 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
951 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
952 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
953 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
954 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
955 // CHECK2-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
956 // CHECK2-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
957 // CHECK2:       arrayctor.loop:
958 // CHECK2-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
959 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
960 // CHECK2-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
961 // CHECK2-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
962 // CHECK2-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
963 // CHECK2:       arrayctor.cont:
964 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
965 // CHECK2-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 8
966 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
967 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
968 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
969 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
970 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
971 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
972 // CHECK2:       cond.true:
973 // CHECK2-NEXT:    br label [[COND_END:%.*]]
974 // CHECK2:       cond.false:
975 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
976 // CHECK2-NEXT:    br label [[COND_END]]
977 // CHECK2:       cond.end:
978 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
979 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
980 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
981 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
982 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
983 // CHECK2:       omp.inner.for.cond:
984 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
985 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
986 // CHECK2-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
987 // CHECK2-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
988 // CHECK2:       omp.inner.for.cond.cleanup:
989 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
990 // CHECK2:       omp.inner.for.body:
991 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
992 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
993 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
994 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
995 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
996 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
997 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
998 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
999 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1000 // CHECK2-NEXT:    [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 8
1001 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1002 // CHECK2-NEXT:    [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
1003 // CHECK2-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
1004 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5]] to i8*
1005 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
1006 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false)
1007 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1008 // CHECK2:       omp.body.continue:
1009 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1010 // CHECK2:       omp.inner.for.inc:
1011 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1012 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
1013 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
1014 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
1015 // CHECK2:       omp.inner.for.end:
1016 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1017 // CHECK2:       omp.loop.exit:
1018 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1019 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
1020 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
1021 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1022 // CHECK2-NEXT:    [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1023 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN7]], i64 2
1024 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1025 // CHECK2:       arraydestroy.body:
1026 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1027 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1028 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1029 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
1030 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
1031 // CHECK2:       arraydestroy.done8:
1032 // CHECK2-NEXT:    ret void
1033 //
1034 //
1035 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1036 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1037 // CHECK2-NEXT:  entry:
1038 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1039 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1040 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1041 // CHECK2-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1042 // CHECK2-NEXT:    ret void
1043 //
1044 //
1045 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1046 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1047 // CHECK2-NEXT:  entry:
1048 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1049 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1050 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1051 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1052 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1053 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1054 // CHECK2-NEXT:    ret void
1055 //
1056 //
1057 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1058 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1059 // CHECK2-NEXT:  entry:
1060 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1061 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1062 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1063 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1064 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1065 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1066 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1067 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1068 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
1069 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1070 // CHECK2-NEXT:    ret void
1071 //
1072 //
1073 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1074 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1075 // CHECK2-NEXT:  entry:
1076 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1077 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1078 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1079 // CHECK2-NEXT:    ret void
1080 //
1081 //
1082 // CHECK2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
1083 // CHECK2-SAME: () #[[ATTR0]] {
1084 // CHECK2-NEXT:  entry:
1085 // CHECK2-NEXT:    call void @__cxx_global_var_init()
1086 // CHECK2-NEXT:    call void @__cxx_global_var_init.1()
1087 // CHECK2-NEXT:    call void @__cxx_global_var_init.2()
1088 // CHECK2-NEXT:    ret void
1089 //
1090 //
1091 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1092 // CHECK2-SAME: () #[[ATTR0]] {
1093 // CHECK2-NEXT:  entry:
1094 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
1095 // CHECK2-NEXT:    ret void
1096 //
1097 //
1098 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init
1099 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
1100 // CHECK3-NEXT:  entry:
1101 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
1102 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
1103 // CHECK3-NEXT:    ret void
1104 //
1105 //
1106 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1107 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1108 // CHECK3-NEXT:  entry:
1109 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1110 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1111 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1112 // CHECK3-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1113 // CHECK3-NEXT:    ret void
1114 //
1115 //
1116 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1117 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1118 // CHECK3-NEXT:  entry:
1119 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1120 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1121 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1122 // CHECK3-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1123 // CHECK3-NEXT:    ret void
1124 //
1125 //
1126 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1127 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1128 // CHECK3-NEXT:  entry:
1129 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1130 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1131 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1132 // CHECK3-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1133 // CHECK3-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1134 // CHECK3-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
1135 // CHECK3-NEXT:    store float [[CONV]], float* [[F]], align 4
1136 // CHECK3-NEXT:    ret void
1137 //
1138 //
1139 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1140 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1141 // CHECK3-NEXT:  entry:
1142 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1143 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1144 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1145 // CHECK3-NEXT:    ret void
1146 //
1147 //
1148 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
1149 // CHECK3-SAME: () #[[ATTR0]] {
1150 // CHECK3-NEXT:  entry:
1151 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), float 1.000000e+00)
1152 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 1), float 2.000000e+00)
1153 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
1154 // CHECK3-NEXT:    ret void
1155 //
1156 //
1157 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1158 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1159 // CHECK3-NEXT:  entry:
1160 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1161 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1162 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1163 // CHECK3-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1164 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1165 // CHECK3-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1166 // CHECK3-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1167 // CHECK3-NEXT:    ret void
1168 //
1169 //
1170 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1171 // CHECK3-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
1172 // CHECK3-NEXT:  entry:
1173 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1174 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1175 // CHECK3-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1176 // CHECK3:       arraydestroy.body:
1177 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i32 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1178 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1179 // CHECK3-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1180 // CHECK3-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
1181 // CHECK3-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1182 // CHECK3:       arraydestroy.done1:
1183 // CHECK3-NEXT:    ret void
1184 //
1185 //
1186 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1187 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1188 // CHECK3-NEXT:  entry:
1189 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1190 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1191 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1192 // CHECK3-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1193 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1194 // CHECK3-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1195 // CHECK3-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1196 // CHECK3-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1197 // CHECK3-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1198 // CHECK3-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1199 // CHECK3-NEXT:    store float [[ADD]], float* [[F]], align 4
1200 // CHECK3-NEXT:    ret void
1201 //
1202 //
1203 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
1204 // CHECK3-SAME: () #[[ATTR0]] {
1205 // CHECK3-NEXT:  entry:
1206 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
1207 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
1208 // CHECK3-NEXT:    ret void
1209 //
1210 //
1211 // CHECK3-LABEL: define {{[^@]+}}@main
1212 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
1213 // CHECK3-NEXT:  entry:
1214 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1215 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1216 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1217 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2)
1218 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
1219 // CHECK3-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
1220 // CHECK3-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1221 // CHECK3:       omp_offload.failed:
1222 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91() #[[ATTR2]]
1223 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1224 // CHECK3:       omp_offload.cont:
1225 // CHECK3-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1226 // CHECK3-NEXT:    ret i32 [[CALL]]
1227 //
1228 //
1229 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91
1230 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
1231 // CHECK3-NEXT:  entry:
1232 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1233 // CHECK3-NEXT:    ret void
1234 //
1235 //
1236 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1237 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
1238 // CHECK3-NEXT:  entry:
1239 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1240 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1241 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1242 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1243 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1244 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1245 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1246 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1247 // CHECK3-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1248 // CHECK3-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1249 // CHECK3-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
1250 // CHECK3-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1251 // CHECK3-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
1252 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1253 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1254 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1255 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1256 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1257 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1258 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1259 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1260 // CHECK3-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
1261 // CHECK3-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1262 // CHECK3:       arrayctor.loop:
1263 // CHECK3-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1264 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1265 // CHECK3-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
1266 // CHECK3-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1267 // CHECK3-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1268 // CHECK3:       arrayctor.cont:
1269 // CHECK3-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
1270 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1271 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1272 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1273 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1274 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1275 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1276 // CHECK3:       cond.true:
1277 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1278 // CHECK3:       cond.false:
1279 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1280 // CHECK3-NEXT:    br label [[COND_END]]
1281 // CHECK3:       cond.end:
1282 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1283 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1284 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1285 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1286 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1287 // CHECK3:       omp.inner.for.cond:
1288 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1289 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1290 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1291 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1292 // CHECK3:       omp.inner.for.cond.cleanup:
1293 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1294 // CHECK3:       omp.inner.for.body:
1295 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1296 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1297 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1298 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1299 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1300 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
1301 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
1302 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1303 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I]], align 4
1304 // CHECK3-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 [[TMP10]]
1305 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
1306 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1307 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i32 4, i1 false)
1308 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
1309 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
1310 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
1311 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[SIVAR]], align 4
1312 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1313 // CHECK3:       omp.body.continue:
1314 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1315 // CHECK3:       omp.inner.for.inc:
1316 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1317 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
1318 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
1319 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
1320 // CHECK3:       omp.inner.for.end:
1321 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1322 // CHECK3:       omp.loop.exit:
1323 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1324 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
1325 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
1326 // CHECK3-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1327 // CHECK3-NEXT:    [[ARRAY_BEGIN5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1328 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN5]], i32 2
1329 // CHECK3-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1330 // CHECK3:       arraydestroy.body:
1331 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1332 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1333 // CHECK3-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1334 // CHECK3-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN5]]
1335 // CHECK3-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY]]
1336 // CHECK3:       arraydestroy.done6:
1337 // CHECK3-NEXT:    ret void
1338 //
1339 //
1340 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1341 // CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat {
1342 // CHECK3-NEXT:  entry:
1343 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1344 // CHECK3-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1345 // CHECK3-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1346 // CHECK3-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1347 // CHECK3-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1348 // CHECK3-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
1349 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1350 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
1351 // CHECK3-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1352 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1353 // CHECK3-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1354 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
1355 // CHECK3-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1356 // CHECK3-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
1357 // CHECK3-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
1358 // CHECK3-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
1359 // CHECK3-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
1360 // CHECK3-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
1361 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2)
1362 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
1363 // CHECK3-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1364 // CHECK3-NEXT:    br i1 [[TMP2]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1365 // CHECK3:       omp_offload.failed:
1366 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
1367 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1368 // CHECK3:       omp_offload.cont:
1369 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1370 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1371 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
1372 // CHECK3-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1373 // CHECK3:       arraydestroy.body:
1374 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1375 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1376 // CHECK3-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1377 // CHECK3-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1378 // CHECK3-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1379 // CHECK3:       arraydestroy.done2:
1380 // CHECK3-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
1381 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
1382 // CHECK3-NEXT:    ret i32 [[TMP4]]
1383 //
1384 //
1385 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1386 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1387 // CHECK3-NEXT:  entry:
1388 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1389 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1390 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1391 // CHECK3-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1392 // CHECK3-NEXT:    ret void
1393 //
1394 //
1395 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1396 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1397 // CHECK3-NEXT:  entry:
1398 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1399 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1400 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1401 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1402 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1403 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1404 // CHECK3-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1405 // CHECK3-NEXT:    ret void
1406 //
1407 //
1408 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
1409 // CHECK3-SAME: () #[[ATTR4]] {
1410 // CHECK3-NEXT:  entry:
1411 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
1412 // CHECK3-NEXT:    ret void
1413 //
1414 //
1415 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
1416 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
1417 // CHECK3-NEXT:  entry:
1418 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1419 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1420 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1421 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1422 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
1423 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1424 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1425 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1426 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1427 // CHECK3-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1428 // CHECK3-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1429 // CHECK3-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1430 // CHECK3-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1431 // CHECK3-NEXT:    [[_TMP2:%.*]] = alloca %struct.S.0*, align 4
1432 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1433 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1434 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1435 // CHECK3-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
1436 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1437 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1438 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1439 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1440 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1441 // CHECK3-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
1442 // CHECK3-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1443 // CHECK3:       arrayctor.loop:
1444 // CHECK3-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1445 // CHECK3-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1446 // CHECK3-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
1447 // CHECK3-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1448 // CHECK3-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1449 // CHECK3:       arrayctor.cont:
1450 // CHECK3-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
1451 // CHECK3-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 4
1452 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1453 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1454 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1455 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1456 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1457 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1458 // CHECK3:       cond.true:
1459 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1460 // CHECK3:       cond.false:
1461 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1462 // CHECK3-NEXT:    br label [[COND_END]]
1463 // CHECK3:       cond.end:
1464 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1465 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1466 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1467 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1468 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1469 // CHECK3:       omp.inner.for.cond:
1470 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1471 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1472 // CHECK3-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1473 // CHECK3-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1474 // CHECK3:       omp.inner.for.cond.cleanup:
1475 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1476 // CHECK3:       omp.inner.for.body:
1477 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1478 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1479 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1480 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1481 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1482 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
1483 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
1484 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1485 // CHECK3-NEXT:    [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 4
1486 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1487 // CHECK3-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 [[TMP11]]
1488 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX4]] to i8*
1489 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
1490 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i32 4, i1 false)
1491 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1492 // CHECK3:       omp.body.continue:
1493 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1494 // CHECK3:       omp.inner.for.inc:
1495 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1496 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1
1497 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
1498 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
1499 // CHECK3:       omp.inner.for.end:
1500 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1501 // CHECK3:       omp.loop.exit:
1502 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1503 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
1504 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
1505 // CHECK3-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1506 // CHECK3-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1507 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN6]], i32 2
1508 // CHECK3-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1509 // CHECK3:       arraydestroy.body:
1510 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1511 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1512 // CHECK3-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1513 // CHECK3-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
1514 // CHECK3-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
1515 // CHECK3:       arraydestroy.done7:
1516 // CHECK3-NEXT:    ret void
1517 //
1518 //
1519 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1520 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1521 // CHECK3-NEXT:  entry:
1522 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1523 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1524 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1525 // CHECK3-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1526 // CHECK3-NEXT:    ret void
1527 //
1528 //
1529 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1530 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1531 // CHECK3-NEXT:  entry:
1532 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1533 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1534 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1535 // CHECK3-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1536 // CHECK3-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1537 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1538 // CHECK3-NEXT:    ret void
1539 //
1540 //
1541 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1542 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1543 // CHECK3-NEXT:  entry:
1544 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1545 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1546 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1547 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1548 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1549 // CHECK3-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1550 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1551 // CHECK3-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1552 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
1553 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1554 // CHECK3-NEXT:    ret void
1555 //
1556 //
1557 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1558 // CHECK3-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1559 // CHECK3-NEXT:  entry:
1560 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1561 // CHECK3-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1562 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1563 // CHECK3-NEXT:    ret void
1564 //
1565 //
1566 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
1567 // CHECK3-SAME: () #[[ATTR0]] {
1568 // CHECK3-NEXT:  entry:
1569 // CHECK3-NEXT:    call void @__cxx_global_var_init()
1570 // CHECK3-NEXT:    call void @__cxx_global_var_init.1()
1571 // CHECK3-NEXT:    call void @__cxx_global_var_init.2()
1572 // CHECK3-NEXT:    ret void
1573 //
1574 //
1575 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1576 // CHECK3-SAME: () #[[ATTR0]] {
1577 // CHECK3-NEXT:  entry:
1578 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
1579 // CHECK3-NEXT:    ret void
1580 //
1581 //
1582 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init
1583 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
1584 // CHECK4-NEXT:  entry:
1585 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
1586 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
1587 // CHECK4-NEXT:    ret void
1588 //
1589 //
1590 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1591 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1592 // CHECK4-NEXT:  entry:
1593 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1594 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1595 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1596 // CHECK4-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1597 // CHECK4-NEXT:    ret void
1598 //
1599 //
1600 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1601 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1602 // CHECK4-NEXT:  entry:
1603 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1604 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1605 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1606 // CHECK4-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1607 // CHECK4-NEXT:    ret void
1608 //
1609 //
1610 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1611 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1612 // CHECK4-NEXT:  entry:
1613 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1614 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1615 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1616 // CHECK4-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1617 // CHECK4-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1618 // CHECK4-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
1619 // CHECK4-NEXT:    store float [[CONV]], float* [[F]], align 4
1620 // CHECK4-NEXT:    ret void
1621 //
1622 //
1623 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1624 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1625 // CHECK4-NEXT:  entry:
1626 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1627 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1628 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1629 // CHECK4-NEXT:    ret void
1630 //
1631 //
1632 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
1633 // CHECK4-SAME: () #[[ATTR0]] {
1634 // CHECK4-NEXT:  entry:
1635 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), float 1.000000e+00)
1636 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 1), float 2.000000e+00)
1637 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
1638 // CHECK4-NEXT:    ret void
1639 //
1640 //
1641 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1642 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1643 // CHECK4-NEXT:  entry:
1644 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1645 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1646 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1647 // CHECK4-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1648 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1649 // CHECK4-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1650 // CHECK4-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1651 // CHECK4-NEXT:    ret void
1652 //
1653 //
1654 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1655 // CHECK4-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
1656 // CHECK4-NEXT:  entry:
1657 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1658 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1659 // CHECK4-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1660 // CHECK4:       arraydestroy.body:
1661 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i32 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1662 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1663 // CHECK4-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1664 // CHECK4-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
1665 // CHECK4-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1666 // CHECK4:       arraydestroy.done1:
1667 // CHECK4-NEXT:    ret void
1668 //
1669 //
1670 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1671 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1672 // CHECK4-NEXT:  entry:
1673 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
1674 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1675 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
1676 // CHECK4-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1677 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
1678 // CHECK4-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1679 // CHECK4-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1680 // CHECK4-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1681 // CHECK4-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1682 // CHECK4-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1683 // CHECK4-NEXT:    store float [[ADD]], float* [[F]], align 4
1684 // CHECK4-NEXT:    ret void
1685 //
1686 //
1687 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
1688 // CHECK4-SAME: () #[[ATTR0]] {
1689 // CHECK4-NEXT:  entry:
1690 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
1691 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
1692 // CHECK4-NEXT:    ret void
1693 //
1694 //
1695 // CHECK4-LABEL: define {{[^@]+}}@main
1696 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
1697 // CHECK4-NEXT:  entry:
1698 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1699 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1700 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1701 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2)
1702 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
1703 // CHECK4-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
1704 // CHECK4-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1705 // CHECK4:       omp_offload.failed:
1706 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91() #[[ATTR2]]
1707 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1708 // CHECK4:       omp_offload.cont:
1709 // CHECK4-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1710 // CHECK4-NEXT:    ret i32 [[CALL]]
1711 //
1712 //
1713 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l91
1714 // CHECK4-SAME: () #[[ATTR4:[0-9]+]] {
1715 // CHECK4-NEXT:  entry:
1716 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1717 // CHECK4-NEXT:    ret void
1718 //
1719 //
1720 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1721 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
1722 // CHECK4-NEXT:  entry:
1723 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1724 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1725 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1726 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1727 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1728 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1729 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1730 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1731 // CHECK4-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1732 // CHECK4-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1733 // CHECK4-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
1734 // CHECK4-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1735 // CHECK4-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
1736 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1737 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1738 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1739 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1740 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1741 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1742 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1743 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1744 // CHECK4-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
1745 // CHECK4-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1746 // CHECK4:       arrayctor.loop:
1747 // CHECK4-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1748 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1749 // CHECK4-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
1750 // CHECK4-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1751 // CHECK4-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1752 // CHECK4:       arrayctor.cont:
1753 // CHECK4-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
1754 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1755 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1756 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1757 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1758 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1759 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1760 // CHECK4:       cond.true:
1761 // CHECK4-NEXT:    br label [[COND_END:%.*]]
1762 // CHECK4:       cond.false:
1763 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1764 // CHECK4-NEXT:    br label [[COND_END]]
1765 // CHECK4:       cond.end:
1766 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1767 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1768 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1769 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1770 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1771 // CHECK4:       omp.inner.for.cond:
1772 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1773 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1774 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1775 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1776 // CHECK4:       omp.inner.for.cond.cleanup:
1777 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1778 // CHECK4:       omp.inner.for.body:
1779 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1780 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1781 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1782 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1783 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1784 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
1785 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
1786 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1787 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I]], align 4
1788 // CHECK4-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 [[TMP10]]
1789 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
1790 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1791 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i32 4, i1 false)
1792 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
1793 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
1794 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
1795 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[SIVAR]], align 4
1796 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1797 // CHECK4:       omp.body.continue:
1798 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1799 // CHECK4:       omp.inner.for.inc:
1800 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1801 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
1802 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
1803 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
1804 // CHECK4:       omp.inner.for.end:
1805 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1806 // CHECK4:       omp.loop.exit:
1807 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1808 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
1809 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
1810 // CHECK4-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1811 // CHECK4-NEXT:    [[ARRAY_BEGIN5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1812 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN5]], i32 2
1813 // CHECK4-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1814 // CHECK4:       arraydestroy.body:
1815 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1816 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1817 // CHECK4-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1818 // CHECK4-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN5]]
1819 // CHECK4-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY]]
1820 // CHECK4:       arraydestroy.done6:
1821 // CHECK4-NEXT:    ret void
1822 //
1823 //
1824 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1825 // CHECK4-SAME: () #[[ATTR6:[0-9]+]] comdat {
1826 // CHECK4-NEXT:  entry:
1827 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1828 // CHECK4-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1829 // CHECK4-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1830 // CHECK4-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1831 // CHECK4-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1832 // CHECK4-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
1833 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1834 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
1835 // CHECK4-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1836 // CHECK4-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1837 // CHECK4-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1838 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
1839 // CHECK4-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1840 // CHECK4-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
1841 // CHECK4-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
1842 // CHECK4-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
1843 // CHECK4-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
1844 // CHECK4-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
1845 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2)
1846 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
1847 // CHECK4-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1848 // CHECK4-NEXT:    br i1 [[TMP2]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1849 // CHECK4:       omp_offload.failed:
1850 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
1851 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1852 // CHECK4:       omp_offload.cont:
1853 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1854 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1855 // CHECK4-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
1856 // CHECK4-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1857 // CHECK4:       arraydestroy.body:
1858 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1859 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1860 // CHECK4-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1861 // CHECK4-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1862 // CHECK4-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1863 // CHECK4:       arraydestroy.done2:
1864 // CHECK4-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
1865 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
1866 // CHECK4-NEXT:    ret i32 [[TMP4]]
1867 //
1868 //
1869 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1870 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1871 // CHECK4-NEXT:  entry:
1872 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1873 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1874 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1875 // CHECK4-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1876 // CHECK4-NEXT:    ret void
1877 //
1878 //
1879 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1880 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1881 // CHECK4-NEXT:  entry:
1882 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1883 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1884 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1885 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1886 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1887 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1888 // CHECK4-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1889 // CHECK4-NEXT:    ret void
1890 //
1891 //
1892 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
1893 // CHECK4-SAME: () #[[ATTR4]] {
1894 // CHECK4-NEXT:  entry:
1895 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
1896 // CHECK4-NEXT:    ret void
1897 //
1898 //
1899 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
1900 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
1901 // CHECK4-NEXT:  entry:
1902 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1903 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1904 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1905 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1906 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
1907 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1908 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1909 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1910 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1911 // CHECK4-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1912 // CHECK4-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1913 // CHECK4-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1914 // CHECK4-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1915 // CHECK4-NEXT:    [[_TMP2:%.*]] = alloca %struct.S.0*, align 4
1916 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1917 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1918 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1919 // CHECK4-NEXT:    store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
1920 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1921 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1922 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1923 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1924 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1925 // CHECK4-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
1926 // CHECK4-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1927 // CHECK4:       arrayctor.loop:
1928 // CHECK4-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1929 // CHECK4-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1930 // CHECK4-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
1931 // CHECK4-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1932 // CHECK4-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1933 // CHECK4:       arrayctor.cont:
1934 // CHECK4-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
1935 // CHECK4-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 4
1936 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1937 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1938 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1939 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1940 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1941 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1942 // CHECK4:       cond.true:
1943 // CHECK4-NEXT:    br label [[COND_END:%.*]]
1944 // CHECK4:       cond.false:
1945 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1946 // CHECK4-NEXT:    br label [[COND_END]]
1947 // CHECK4:       cond.end:
1948 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1949 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1950 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1951 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1952 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1953 // CHECK4:       omp.inner.for.cond:
1954 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1955 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1956 // CHECK4-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1957 // CHECK4-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1958 // CHECK4:       omp.inner.for.cond.cleanup:
1959 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1960 // CHECK4:       omp.inner.for.body:
1961 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1962 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1963 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1964 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1965 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1966 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
1967 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
1968 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1969 // CHECK4-NEXT:    [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 4
1970 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1971 // CHECK4-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 [[TMP11]]
1972 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX4]] to i8*
1973 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
1974 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i32 4, i1 false)
1975 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1976 // CHECK4:       omp.body.continue:
1977 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1978 // CHECK4:       omp.inner.for.inc:
1979 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1980 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1
1981 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
1982 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
1983 // CHECK4:       omp.inner.for.end:
1984 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1985 // CHECK4:       omp.loop.exit:
1986 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1987 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
1988 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
1989 // CHECK4-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1990 // CHECK4-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1991 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN6]], i32 2
1992 // CHECK4-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1993 // CHECK4:       arraydestroy.body:
1994 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1995 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1996 // CHECK4-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1997 // CHECK4-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
1998 // CHECK4-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
1999 // CHECK4:       arraydestroy.done7:
2000 // CHECK4-NEXT:    ret void
2001 //
2002 //
2003 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
2004 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2005 // CHECK4-NEXT:  entry:
2006 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
2007 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
2008 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
2009 // CHECK4-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
2010 // CHECK4-NEXT:    ret void
2011 //
2012 //
2013 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2014 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2015 // CHECK4-NEXT:  entry:
2016 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
2017 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
2018 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
2019 // CHECK4-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2020 // CHECK4-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
2021 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2022 // CHECK4-NEXT:    ret void
2023 //
2024 //
2025 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2026 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2027 // CHECK4-NEXT:  entry:
2028 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
2029 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2030 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
2031 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2032 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
2033 // CHECK4-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2034 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2035 // CHECK4-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
2036 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
2037 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
2038 // CHECK4-NEXT:    ret void
2039 //
2040 //
2041 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2042 // CHECK4-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2043 // CHECK4-NEXT:  entry:
2044 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
2045 // CHECK4-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
2046 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
2047 // CHECK4-NEXT:    ret void
2048 //
2049 //
2050 // CHECK4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
2051 // CHECK4-SAME: () #[[ATTR0]] {
2052 // CHECK4-NEXT:  entry:
2053 // CHECK4-NEXT:    call void @__cxx_global_var_init()
2054 // CHECK4-NEXT:    call void @__cxx_global_var_init.1()
2055 // CHECK4-NEXT:    call void @__cxx_global_var_init.2()
2056 // CHECK4-NEXT:    ret void
2057 //
2058 //
2059 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2060 // CHECK4-SAME: () #[[ATTR0]] {
2061 // CHECK4-NEXT:  entry:
2062 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
2063 // CHECK4-NEXT:    ret void
2064 //
2065 //
2066 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init
2067 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
2068 // CHECK9-NEXT:  entry:
2069 // CHECK9-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
2070 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
2071 // CHECK9-NEXT:    ret void
2072 //
2073 //
2074 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
2075 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
2076 // CHECK9-NEXT:  entry:
2077 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2078 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2079 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2080 // CHECK9-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
2081 // CHECK9-NEXT:    ret void
2082 //
2083 //
2084 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
2085 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2086 // CHECK9-NEXT:  entry:
2087 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2088 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2089 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2090 // CHECK9-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
2091 // CHECK9-NEXT:    ret void
2092 //
2093 //
2094 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
2095 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2096 // CHECK9-NEXT:  entry:
2097 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2098 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2099 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2100 // CHECK9-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2101 // CHECK9-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
2102 // CHECK9-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
2103 // CHECK9-NEXT:    store float [[CONV]], float* [[F]], align 4
2104 // CHECK9-NEXT:    ret void
2105 //
2106 //
2107 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
2108 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2109 // CHECK9-NEXT:  entry:
2110 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2111 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2112 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2113 // CHECK9-NEXT:    ret void
2114 //
2115 //
2116 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
2117 // CHECK9-SAME: () #[[ATTR0]] {
2118 // CHECK9-NEXT:  entry:
2119 // CHECK9-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00)
2120 // CHECK9-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00)
2121 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
2122 // CHECK9-NEXT:    ret void
2123 //
2124 //
2125 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
2126 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2127 // CHECK9-NEXT:  entry:
2128 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2129 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2130 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2131 // CHECK9-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2132 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2133 // CHECK9-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2134 // CHECK9-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
2135 // CHECK9-NEXT:    ret void
2136 //
2137 //
2138 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
2139 // CHECK9-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
2140 // CHECK9-NEXT:  entry:
2141 // CHECK9-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2142 // CHECK9-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2143 // CHECK9-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2144 // CHECK9:       arraydestroy.body:
2145 // CHECK9-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2146 // CHECK9-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2147 // CHECK9-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
2148 // CHECK9-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
2149 // CHECK9-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
2150 // CHECK9:       arraydestroy.done1:
2151 // CHECK9-NEXT:    ret void
2152 //
2153 //
2154 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
2155 // CHECK9-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2156 // CHECK9-NEXT:  entry:
2157 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2158 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2159 // CHECK9-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2160 // CHECK9-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2161 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2162 // CHECK9-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2163 // CHECK9-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2164 // CHECK9-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
2165 // CHECK9-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
2166 // CHECK9-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
2167 // CHECK9-NEXT:    store float [[ADD]], float* [[F]], align 4
2168 // CHECK9-NEXT:    ret void
2169 //
2170 //
2171 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
2172 // CHECK9-SAME: () #[[ATTR0]] {
2173 // CHECK9-NEXT:  entry:
2174 // CHECK9-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
2175 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
2176 // CHECK9-NEXT:    ret void
2177 //
2178 //
2179 // CHECK9-LABEL: define {{[^@]+}}@main
2180 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] {
2181 // CHECK9-NEXT:  entry:
2182 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2183 // CHECK9-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2184 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2185 // CHECK9-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2186 // CHECK9-NEXT:    ret i32 0
2187 //
2188 //
2189 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
2190 // CHECK9-SAME: () #[[ATTR5:[0-9]+]] {
2191 // CHECK9-NEXT:  entry:
2192 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2193 // CHECK9-NEXT:    ret void
2194 //
2195 //
2196 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
2197 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
2198 // CHECK9-NEXT:  entry:
2199 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2200 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2201 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2202 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2203 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32*, align 8
2204 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2205 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2206 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2207 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2208 // CHECK9-NEXT:    [[G:%.*]] = alloca i32, align 4
2209 // CHECK9-NEXT:    [[G1:%.*]] = alloca i32, align 4
2210 // CHECK9-NEXT:    [[_TMP2:%.*]] = alloca i32*, align 8
2211 // CHECK9-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
2212 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
2213 // CHECK9-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2214 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2215 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2216 // CHECK9-NEXT:    store i32* undef, i32** [[_TMP1]], align 8
2217 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2218 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2219 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2220 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2221 // CHECK9-NEXT:    store i32* [[G1]], i32** [[_TMP2]], align 8
2222 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2223 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2224 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2225 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2226 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
2227 // CHECK9-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2228 // CHECK9:       cond.true:
2229 // CHECK9-NEXT:    br label [[COND_END:%.*]]
2230 // CHECK9:       cond.false:
2231 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2232 // CHECK9-NEXT:    br label [[COND_END]]
2233 // CHECK9:       cond.end:
2234 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2235 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2236 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2237 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2238 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2239 // CHECK9:       omp.inner.for.cond:
2240 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2241 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2242 // CHECK9-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2243 // CHECK9-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2244 // CHECK9:       omp.inner.for.body:
2245 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2246 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2247 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2248 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2249 // CHECK9-NEXT:    store i32 1, i32* [[G]], align 4
2250 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[_TMP2]], align 8
2251 // CHECK9-NEXT:    store volatile i32 1, i32* [[TMP8]], align 4
2252 // CHECK9-NEXT:    store i32 2, i32* [[SIVAR]], align 4
2253 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2254 // CHECK9-NEXT:    store i32* [[G]], i32** [[TMP9]], align 8
2255 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2256 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[_TMP2]], align 8
2257 // CHECK9-NEXT:    store i32* [[TMP11]], i32** [[TMP10]], align 8
2258 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2259 // CHECK9-NEXT:    store i32* [[SIVAR]], i32** [[TMP12]], align 8
2260 // CHECK9-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]])
2261 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2262 // CHECK9:       omp.body.continue:
2263 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2264 // CHECK9:       omp.inner.for.inc:
2265 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2266 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
2267 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
2268 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
2269 // CHECK9:       omp.inner.for.end:
2270 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2271 // CHECK9:       omp.loop.exit:
2272 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2273 // CHECK9-NEXT:    ret void
2274 //
2275 //
2276 // CHECK9-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
2277 // CHECK9-SAME: () #[[ATTR0]] {
2278 // CHECK9-NEXT:  entry:
2279 // CHECK9-NEXT:    call void @__cxx_global_var_init()
2280 // CHECK9-NEXT:    call void @__cxx_global_var_init.1()
2281 // CHECK9-NEXT:    call void @__cxx_global_var_init.2()
2282 // CHECK9-NEXT:    ret void
2283 //
2284 //
2285 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2286 // CHECK9-SAME: () #[[ATTR0]] {
2287 // CHECK9-NEXT:  entry:
2288 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
2289 // CHECK9-NEXT:    ret void
2290 //
2291 //
2292 // CHECK10-LABEL: define {{[^@]+}}@__cxx_global_var_init
2293 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
2294 // CHECK10-NEXT:  entry:
2295 // CHECK10-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) @test)
2296 // CHECK10-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
2297 // CHECK10-NEXT:    ret void
2298 //
2299 //
2300 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
2301 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
2302 // CHECK10-NEXT:  entry:
2303 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2304 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2305 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2306 // CHECK10-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
2307 // CHECK10-NEXT:    ret void
2308 //
2309 //
2310 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
2311 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2312 // CHECK10-NEXT:  entry:
2313 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2314 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2315 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2316 // CHECK10-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
2317 // CHECK10-NEXT:    ret void
2318 //
2319 //
2320 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
2321 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2322 // CHECK10-NEXT:  entry:
2323 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2324 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2325 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2326 // CHECK10-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2327 // CHECK10-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
2328 // CHECK10-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
2329 // CHECK10-NEXT:    store float [[CONV]], float* [[F]], align 4
2330 // CHECK10-NEXT:    ret void
2331 //
2332 //
2333 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
2334 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2335 // CHECK10-NEXT:  entry:
2336 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2337 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2338 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2339 // CHECK10-NEXT:    ret void
2340 //
2341 //
2342 // CHECK10-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
2343 // CHECK10-SAME: () #[[ATTR0]] {
2344 // CHECK10-NEXT:  entry:
2345 // CHECK10-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00)
2346 // CHECK10-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00)
2347 // CHECK10-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
2348 // CHECK10-NEXT:    ret void
2349 //
2350 //
2351 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
2352 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2353 // CHECK10-NEXT:  entry:
2354 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2355 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2356 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2357 // CHECK10-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2358 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2359 // CHECK10-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2360 // CHECK10-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
2361 // CHECK10-NEXT:    ret void
2362 //
2363 //
2364 // CHECK10-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
2365 // CHECK10-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] {
2366 // CHECK10-NEXT:  entry:
2367 // CHECK10-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2368 // CHECK10-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2369 // CHECK10-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2370 // CHECK10:       arraydestroy.body:
2371 // CHECK10-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2372 // CHECK10-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2373 // CHECK10-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
2374 // CHECK10-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
2375 // CHECK10-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
2376 // CHECK10:       arraydestroy.done1:
2377 // CHECK10-NEXT:    ret void
2378 //
2379 //
2380 // CHECK10-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
2381 // CHECK10-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2382 // CHECK10-NEXT:  entry:
2383 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2384 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2385 // CHECK10-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2386 // CHECK10-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2387 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2388 // CHECK10-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2389 // CHECK10-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2390 // CHECK10-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
2391 // CHECK10-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
2392 // CHECK10-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
2393 // CHECK10-NEXT:    store float [[ADD]], float* [[F]], align 4
2394 // CHECK10-NEXT:    ret void
2395 //
2396 //
2397 // CHECK10-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
2398 // CHECK10-SAME: () #[[ATTR0]] {
2399 // CHECK10-NEXT:  entry:
2400 // CHECK10-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @var, float 3.000000e+00)
2401 // CHECK10-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
2402 // CHECK10-NEXT:    ret void
2403 //
2404 //
2405 // CHECK10-LABEL: define {{[^@]+}}@main
2406 // CHECK10-SAME: () #[[ATTR3:[0-9]+]] {
2407 // CHECK10-NEXT:  entry:
2408 // CHECK10-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2409 // CHECK10-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2410 // CHECK10-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2411 // CHECK10-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2412 // CHECK10-NEXT:    ret i32 0
2413 //
2414 //
2415 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
2416 // CHECK10-SAME: () #[[ATTR5:[0-9]+]] {
2417 // CHECK10-NEXT:  entry:
2418 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2419 // CHECK10-NEXT:    ret void
2420 //
2421 //
2422 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
2423 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
2424 // CHECK10-NEXT:  entry:
2425 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2426 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2427 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2428 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2429 // CHECK10-NEXT:    [[_TMP1:%.*]] = alloca i32*, align 8
2430 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2431 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2432 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2433 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2434 // CHECK10-NEXT:    [[G:%.*]] = alloca i32, align 4
2435 // CHECK10-NEXT:    [[G1:%.*]] = alloca i32, align 4
2436 // CHECK10-NEXT:    [[_TMP2:%.*]] = alloca i32*, align 8
2437 // CHECK10-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
2438 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
2439 // CHECK10-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2440 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2441 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2442 // CHECK10-NEXT:    store i32* undef, i32** [[_TMP1]], align 8
2443 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2444 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2445 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2446 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2447 // CHECK10-NEXT:    store i32* [[G1]], i32** [[_TMP2]], align 8
2448 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2449 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2450 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2451 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2452 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
2453 // CHECK10-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2454 // CHECK10:       cond.true:
2455 // CHECK10-NEXT:    br label [[COND_END:%.*]]
2456 // CHECK10:       cond.false:
2457 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2458 // CHECK10-NEXT:    br label [[COND_END]]
2459 // CHECK10:       cond.end:
2460 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2461 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2462 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2463 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2464 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2465 // CHECK10:       omp.inner.for.cond:
2466 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2467 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2468 // CHECK10-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2469 // CHECK10-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2470 // CHECK10:       omp.inner.for.body:
2471 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2472 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2473 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2474 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2475 // CHECK10-NEXT:    store i32 1, i32* [[G]], align 4
2476 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[_TMP2]], align 8
2477 // CHECK10-NEXT:    store volatile i32 1, i32* [[TMP8]], align 4
2478 // CHECK10-NEXT:    store i32 2, i32* [[SIVAR]], align 4
2479 // CHECK10-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2480 // CHECK10-NEXT:    store i32* [[G]], i32** [[TMP9]], align 8
2481 // CHECK10-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2482 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[_TMP2]], align 8
2483 // CHECK10-NEXT:    store i32* [[TMP11]], i32** [[TMP10]], align 8
2484 // CHECK10-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2485 // CHECK10-NEXT:    store i32* [[SIVAR]], i32** [[TMP12]], align 8
2486 // CHECK10-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]])
2487 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2488 // CHECK10:       omp.body.continue:
2489 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2490 // CHECK10:       omp.inner.for.inc:
2491 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2492 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
2493 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
2494 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
2495 // CHECK10:       omp.inner.for.end:
2496 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2497 // CHECK10:       omp.loop.exit:
2498 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2499 // CHECK10-NEXT:    ret void
2500 //
2501 //
2502 // CHECK10-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_teams_distribute_private_codegen.cpp
2503 // CHECK10-SAME: () #[[ATTR0]] {
2504 // CHECK10-NEXT:  entry:
2505 // CHECK10-NEXT:    call void @__cxx_global_var_init()
2506 // CHECK10-NEXT:    call void @__cxx_global_var_init.1()
2507 // CHECK10-NEXT:    call void @__cxx_global_var_init.2()
2508 // CHECK10-NEXT:    ret void
2509 //
2510 //
2511 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2512 // CHECK10-SAME: () #[[ATTR0]] {
2513 // CHECK10-NEXT:  entry:
2514 // CHECK10-NEXT:    call void @__tgt_register_requires(i64 1)
2515 // CHECK10-NEXT:    ret void
2516 //
2517 //