1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4
7 
8 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 volatile double g;
18 
19 template <class T>
20 struct S {
21   T f;
SS22   S(T a) : f(a + g) {}
SS23   S() : f(g) {}
operator TS24   operator T() { return T(); }
operator &S25   S &operator&(const S &) { return *this; }
~SS26   ~S() {}
27 };
28 
29 
30 template <typename T>
tmain()31 T tmain() {
32   T t;
33   S<T> test;
34   T t_var = T(), t_var1;
35   T vec[] = {1, 2};
36   S<T> s_arr[] = {1, 2};
37   S<T> var(3), var1;
38 #pragma omp parallel
39 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
40   {
41     vec[0] = t_var;
42 #pragma omp section
43     s_arr[0] = var;
44   }
45   return T();
46 }
47 
main()48 int main() {
49 #ifdef LAMBDA
50   [&]() {
51 #pragma omp parallel
52 #pragma omp sections reduction(+:g)
53     {
54 
55     // Reduction list for runtime.
56 
57     g = 1;
58 
59 #pragma omp section
60     [&]() {
61       g = 2;
62     }();
63   }
64   }();
65   return 0;
66 #elif defined(BLOCKS)
67   ^{
68 #pragma omp parallel
69 #pragma omp sections reduction(-:g)
70     {
71 
72     // Reduction list for runtime.
73 
74     g = 1;
75 
76 #pragma omp section
77     ^{
78       g = 2;
79     }();
80   }
81   }();
82   return 0;
83 #else
84   S<float> test;
85   float t_var = 0, t_var1;
86   int vec[] = {1, 2};
87   S<float> s_arr[] = {1, 2};
88   S<float> var(3), var1;
89 #pragma omp parallel
90 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
91   {
92     {
93     vec[0] = t_var;
94     s_arr[0] = var;
95     vec[1] = t_var1;
96     s_arr[1] = var1;
97     }
98   }
99   return tmain<int>();
100 #endif
101 }
102 
103 
104 
105 
106 
107 
108 
109 
110 
111 // Reduction list for runtime.
112 
113 
114 
115 // For + reduction operation initial value of private variable is 0.
116 
117 // For & reduction operation initial value of private variable is ones in all bits.
118 
119 // For && reduction operation initial value of private variable is 1.0.
120 
121 // For min reduction operation initial value of private variable is largest repesentable value.
122 
123 // Skip checks for internal operations.
124 
125 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
126 
127 
128 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
129 
130 
131 // switch(res)
132 
133 // case 1:
134 // t_var += t_var_reduction;
135 
136 // var = var.operator &(var_reduction);
137 
138 // var1 = var1.operator &&(var1_reduction);
139 
140 // t_var1 = min(t_var1, t_var1_reduction);
141 
142 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
143 
144 // break;
145 
146 // case 2:
147 // t_var += t_var_reduction;
148 
149 // var = var.operator &(var_reduction);
150 
151 // var1 = var1.operator &&(var1_reduction);
152 
153 // t_var1 = min(t_var1, t_var1_reduction);
154 
155 // break;
156 
157 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
158 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
159 //  ...
160 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
161 //  *(Type<n>-1*)rhs[<n>-1]);
162 // }
163 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
164 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
165 
166 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
167 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
168 
169 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
170 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
171 
172 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
173 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
174 
175 // t_var_lhs += t_var_rhs;
176 
177 // var_lhs = var_lhs.operator &(var_rhs);
178 
179 // var1_lhs = var1_lhs.operator &&(var1_rhs);
180 
181 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
182 
183 #endif
184 // CHECK1-LABEL: define {{[^@]+}}@main
185 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
186 // CHECK1-NEXT:  entry:
187 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
188 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
189 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
190 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
191 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
192 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
193 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
194 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
195 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
196 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
197 // CHECK1-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
198 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
199 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
200 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
201 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
202 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
203 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
204 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
205 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
206 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float*, %struct.S*, %struct.S*, float*, [2 x i32]*, [2 x %struct.S]*)* @.omp_outlined. to void (i32*, i32*, ...)*), float* [[T_VAR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S]* [[S_ARR]])
207 // CHECK1-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
208 // CHECK1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
209 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
210 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
211 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
212 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
213 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
214 // CHECK1:       arraydestroy.body:
215 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
216 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
217 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
218 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
219 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
220 // CHECK1:       arraydestroy.done1:
221 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
222 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
223 // CHECK1-NEXT:    ret i32 [[TMP2]]
224 //
225 //
226 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
227 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
228 // CHECK1-NEXT:  entry:
229 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
230 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
231 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
232 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
233 // CHECK1-NEXT:    ret void
234 //
235 //
236 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
237 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
238 // CHECK1-NEXT:  entry:
239 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
240 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
241 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
242 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
243 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
244 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
245 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
246 // CHECK1-NEXT:    ret void
247 //
248 //
249 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
250 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3:[0-9]+]] {
251 // CHECK1-NEXT:  entry:
252 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
253 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
254 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca float*, align 8
255 // CHECK1-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
256 // CHECK1-NEXT:    [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
257 // CHECK1-NEXT:    [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
258 // CHECK1-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
259 // CHECK1-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
260 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
261 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
262 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
263 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
264 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
265 // CHECK1-NEXT:    [[T_VAR2:%.*]] = alloca float, align 4
266 // CHECK1-NEXT:    [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
267 // CHECK1-NEXT:    [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
268 // CHECK1-NEXT:    [[T_VAR15:%.*]] = alloca float, align 4
269 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
270 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
271 // CHECK1-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
272 // CHECK1-NEXT:    [[TMP:%.*]] = alloca float, align 4
273 // CHECK1-NEXT:    [[REF_TMP17:%.*]] = alloca [[STRUCT_S]], align 4
274 // CHECK1-NEXT:    [[ATOMIC_TEMP27:%.*]] = alloca float, align 4
275 // CHECK1-NEXT:    [[_TMP28:%.*]] = alloca float, align 4
276 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
277 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
278 // CHECK1-NEXT:    store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
279 // CHECK1-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
280 // CHECK1-NEXT:    store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
281 // CHECK1-NEXT:    store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
282 // CHECK1-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
283 // CHECK1-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
284 // CHECK1-NEXT:    [[TMP0:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
285 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
286 // CHECK1-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
287 // CHECK1-NEXT:    [[TMP3:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
288 // CHECK1-NEXT:    [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
289 // CHECK1-NEXT:    [[TMP5:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
290 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
291 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
292 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
293 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
294 // CHECK1-NEXT:    store float 0.000000e+00, float* [[T_VAR2]], align 4
295 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
296 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
297 // CHECK1-NEXT:    store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
298 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
299 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
300 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
301 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
302 // CHECK1-NEXT:    [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 0
303 // CHECK1-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 0
304 // CHECK1-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
305 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
306 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
307 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
308 // CHECK1:       omp.inner.for.cond:
309 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
310 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
311 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
312 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
313 // CHECK1:       omp.inner.for.body:
314 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
315 // CHECK1-NEXT:    switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
316 // CHECK1-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
317 // CHECK1-NEXT:    ]
318 // CHECK1:       .omp.sections.case:
319 // CHECK1-NEXT:    [[TMP15:%.*]] = load float, float* [[T_VAR2]], align 4
320 // CHECK1-NEXT:    [[CONV:%.*]] = fptosi float [[TMP15]] to i32
321 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
322 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
323 // CHECK1-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 0
324 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
325 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
326 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
327 // CHECK1-NEXT:    [[TMP18:%.*]] = load float, float* [[T_VAR15]], align 4
328 // CHECK1-NEXT:    [[CONV7:%.*]] = fptosi float [[TMP18]] to i32
329 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 1
330 // CHECK1-NEXT:    store i32 [[CONV7]], i32* [[ARRAYIDX8]], align 4
331 // CHECK1-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 1
332 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
333 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
334 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
335 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
336 // CHECK1:       .omp.sections.exit:
337 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
338 // CHECK1:       omp.inner.for.inc:
339 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
340 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP21]], 1
341 // CHECK1-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
342 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
343 // CHECK1:       omp.inner.for.end:
344 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
345 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
346 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast float* [[T_VAR2]] to i8*
347 // CHECK1-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
348 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
349 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
350 // CHECK1-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
351 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
352 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
353 // CHECK1-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
354 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
355 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast float* [[T_VAR15]] to i8*
356 // CHECK1-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
357 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
358 // CHECK1-NEXT:    [[TMP31:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP30]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
359 // CHECK1-NEXT:    switch i32 [[TMP31]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
360 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
361 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
362 // CHECK1-NEXT:    ]
363 // CHECK1:       .omp.reduction.case1:
364 // CHECK1-NEXT:    [[TMP32:%.*]] = load float, float* [[TMP0]], align 4
365 // CHECK1-NEXT:    [[TMP33:%.*]] = load float, float* [[T_VAR2]], align 4
366 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP32]], [[TMP33]]
367 // CHECK1-NEXT:    store float [[ADD]], float* [[TMP0]], align 4
368 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
369 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
370 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[CALL]] to i8*
371 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
372 // CHECK1-NEXT:    [[CALL10:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
373 // CHECK1-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL10]], 0.000000e+00
374 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
375 // CHECK1:       land.rhs:
376 // CHECK1-NEXT:    [[CALL11:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
377 // CHECK1-NEXT:    [[TOBOOL12:%.*]] = fcmp une float [[CALL11]], 0.000000e+00
378 // CHECK1-NEXT:    br label [[LAND_END]]
379 // CHECK1:       land.end:
380 // CHECK1-NEXT:    [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL12]], [[LAND_RHS]] ]
381 // CHECK1-NEXT:    [[CONV13:%.*]] = uitofp i1 [[TMP36]] to float
382 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV13]])
383 // CHECK1-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
384 // CHECK1-NEXT:    [[TMP38:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
385 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
386 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
387 // CHECK1-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP3]], align 4
388 // CHECK1-NEXT:    [[TMP40:%.*]] = load float, float* [[T_VAR15]], align 4
389 // CHECK1-NEXT:    [[CMP14:%.*]] = fcmp olt float [[TMP39]], [[TMP40]]
390 // CHECK1-NEXT:    br i1 [[CMP14]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
391 // CHECK1:       cond.true:
392 // CHECK1-NEXT:    [[TMP41:%.*]] = load float, float* [[TMP3]], align 4
393 // CHECK1-NEXT:    br label [[COND_END:%.*]]
394 // CHECK1:       cond.false:
395 // CHECK1-NEXT:    [[TMP42:%.*]] = load float, float* [[T_VAR15]], align 4
396 // CHECK1-NEXT:    br label [[COND_END]]
397 // CHECK1:       cond.end:
398 // CHECK1-NEXT:    [[COND:%.*]] = phi float [ [[TMP41]], [[COND_TRUE]] ], [ [[TMP42]], [[COND_FALSE]] ]
399 // CHECK1-NEXT:    store float [[COND]], float* [[TMP3]], align 4
400 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
401 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
402 // CHECK1:       .omp.reduction.case2:
403 // CHECK1-NEXT:    [[TMP43:%.*]] = load float, float* [[T_VAR2]], align 4
404 // CHECK1-NEXT:    [[TMP44:%.*]] = bitcast float* [[TMP0]] to i32*
405 // CHECK1-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP44]] monotonic, align 4
406 // CHECK1-NEXT:    br label [[ATOMIC_CONT:%.*]]
407 // CHECK1:       atomic_cont:
408 // CHECK1-NEXT:    [[TMP45:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP53:%.*]], [[ATOMIC_CONT]] ]
409 // CHECK1-NEXT:    [[TMP46:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
410 // CHECK1-NEXT:    [[TMP47:%.*]] = bitcast i32 [[TMP45]] to float
411 // CHECK1-NEXT:    store float [[TMP47]], float* [[TMP]], align 4
412 // CHECK1-NEXT:    [[TMP48:%.*]] = load float, float* [[TMP]], align 4
413 // CHECK1-NEXT:    [[TMP49:%.*]] = load float, float* [[T_VAR2]], align 4
414 // CHECK1-NEXT:    [[ADD15:%.*]] = fadd float [[TMP48]], [[TMP49]]
415 // CHECK1-NEXT:    store float [[ADD15]], float* [[ATOMIC_TEMP]], align 4
416 // CHECK1-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP46]], align 4
417 // CHECK1-NEXT:    [[TMP51:%.*]] = bitcast float* [[TMP0]] to i32*
418 // CHECK1-NEXT:    [[TMP52:%.*]] = cmpxchg i32* [[TMP51]], i32 [[TMP45]], i32 [[TMP50]] monotonic monotonic, align 4
419 // CHECK1-NEXT:    [[TMP53]] = extractvalue { i32, i1 } [[TMP52]], 0
420 // CHECK1-NEXT:    [[TMP54:%.*]] = extractvalue { i32, i1 } [[TMP52]], 1
421 // CHECK1-NEXT:    br i1 [[TMP54]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
422 // CHECK1:       atomic_exit:
423 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
424 // CHECK1-NEXT:    [[CALL16:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
425 // CHECK1-NEXT:    [[TMP55:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
426 // CHECK1-NEXT:    [[TMP56:%.*]] = bitcast %struct.S* [[CALL16]] to i8*
427 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP55]], i8* align 4 [[TMP56]], i64 4, i1 false)
428 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
429 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
430 // CHECK1-NEXT:    [[CALL18:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
431 // CHECK1-NEXT:    [[TOBOOL19:%.*]] = fcmp une float [[CALL18]], 0.000000e+00
432 // CHECK1-NEXT:    br i1 [[TOBOOL19]], label [[LAND_RHS20:%.*]], label [[LAND_END23:%.*]]
433 // CHECK1:       land.rhs20:
434 // CHECK1-NEXT:    [[CALL21:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
435 // CHECK1-NEXT:    [[TOBOOL22:%.*]] = fcmp une float [[CALL21]], 0.000000e+00
436 // CHECK1-NEXT:    br label [[LAND_END23]]
437 // CHECK1:       land.end23:
438 // CHECK1-NEXT:    [[TMP57:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL22]], [[LAND_RHS20]] ]
439 // CHECK1-NEXT:    [[CONV24:%.*]] = uitofp i1 [[TMP57]] to float
440 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]], float [[CONV24]])
441 // CHECK1-NEXT:    [[TMP58:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
442 // CHECK1-NEXT:    [[TMP59:%.*]] = bitcast %struct.S* [[REF_TMP17]] to i8*
443 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP58]], i8* align 4 [[TMP59]], i64 4, i1 false)
444 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]]) #[[ATTR4]]
445 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
446 // CHECK1-NEXT:    [[TMP60:%.*]] = load float, float* [[T_VAR15]], align 4
447 // CHECK1-NEXT:    [[TMP61:%.*]] = bitcast float* [[TMP3]] to i32*
448 // CHECK1-NEXT:    [[ATOMIC_LOAD25:%.*]] = load atomic i32, i32* [[TMP61]] monotonic, align 4
449 // CHECK1-NEXT:    br label [[ATOMIC_CONT26:%.*]]
450 // CHECK1:       atomic_cont26:
451 // CHECK1-NEXT:    [[TMP62:%.*]] = phi i32 [ [[ATOMIC_LOAD25]], [[LAND_END23]] ], [ [[TMP72:%.*]], [[COND_END32:%.*]] ]
452 // CHECK1-NEXT:    [[TMP63:%.*]] = bitcast float* [[ATOMIC_TEMP27]] to i32*
453 // CHECK1-NEXT:    [[TMP64:%.*]] = bitcast i32 [[TMP62]] to float
454 // CHECK1-NEXT:    store float [[TMP64]], float* [[_TMP28]], align 4
455 // CHECK1-NEXT:    [[TMP65:%.*]] = load float, float* [[_TMP28]], align 4
456 // CHECK1-NEXT:    [[TMP66:%.*]] = load float, float* [[T_VAR15]], align 4
457 // CHECK1-NEXT:    [[CMP29:%.*]] = fcmp olt float [[TMP65]], [[TMP66]]
458 // CHECK1-NEXT:    br i1 [[CMP29]], label [[COND_TRUE30:%.*]], label [[COND_FALSE31:%.*]]
459 // CHECK1:       cond.true30:
460 // CHECK1-NEXT:    [[TMP67:%.*]] = load float, float* [[_TMP28]], align 4
461 // CHECK1-NEXT:    br label [[COND_END32]]
462 // CHECK1:       cond.false31:
463 // CHECK1-NEXT:    [[TMP68:%.*]] = load float, float* [[T_VAR15]], align 4
464 // CHECK1-NEXT:    br label [[COND_END32]]
465 // CHECK1:       cond.end32:
466 // CHECK1-NEXT:    [[COND33:%.*]] = phi float [ [[TMP67]], [[COND_TRUE30]] ], [ [[TMP68]], [[COND_FALSE31]] ]
467 // CHECK1-NEXT:    store float [[COND33]], float* [[ATOMIC_TEMP27]], align 4
468 // CHECK1-NEXT:    [[TMP69:%.*]] = load i32, i32* [[TMP63]], align 4
469 // CHECK1-NEXT:    [[TMP70:%.*]] = bitcast float* [[TMP3]] to i32*
470 // CHECK1-NEXT:    [[TMP71:%.*]] = cmpxchg i32* [[TMP70]], i32 [[TMP62]], i32 [[TMP69]] monotonic monotonic, align 4
471 // CHECK1-NEXT:    [[TMP72]] = extractvalue { i32, i1 } [[TMP71]], 0
472 // CHECK1-NEXT:    [[TMP73:%.*]] = extractvalue { i32, i1 } [[TMP71]], 1
473 // CHECK1-NEXT:    br i1 [[TMP73]], label [[ATOMIC_EXIT34:%.*]], label [[ATOMIC_CONT26]]
474 // CHECK1:       atomic_exit34:
475 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
476 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
477 // CHECK1:       .omp.reduction.default:
478 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
479 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
480 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP7]])
481 // CHECK1-NEXT:    ret void
482 //
483 //
484 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
485 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
486 // CHECK1-NEXT:  entry:
487 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
488 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
489 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
490 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
491 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
492 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
493 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
494 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
495 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
496 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
497 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
498 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
499 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
500 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
501 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
502 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
503 // CHECK1-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
504 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
505 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
506 // CHECK1-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
507 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
508 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
509 // CHECK1-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
510 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
511 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
512 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
513 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
514 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
515 // CHECK1-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
516 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
517 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
518 // CHECK1-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
519 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
520 // CHECK1-NEXT:    [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
521 // CHECK1-NEXT:    [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
522 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
523 // CHECK1-NEXT:    store float [[ADD]], float* [[TMP11]], align 4
524 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
525 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
526 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
527 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
528 // CHECK1-NEXT:    [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
529 // CHECK1-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
530 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
531 // CHECK1:       land.rhs:
532 // CHECK1-NEXT:    [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
533 // CHECK1-NEXT:    [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
534 // CHECK1-NEXT:    br label [[LAND_END]]
535 // CHECK1:       land.end:
536 // CHECK1-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
537 // CHECK1-NEXT:    [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
538 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
539 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
540 // CHECK1-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
541 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
542 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
543 // CHECK1-NEXT:    [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
544 // CHECK1-NEXT:    [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
545 // CHECK1-NEXT:    [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
546 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
547 // CHECK1:       cond.true:
548 // CHECK1-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
549 // CHECK1-NEXT:    br label [[COND_END:%.*]]
550 // CHECK1:       cond.false:
551 // CHECK1-NEXT:    [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
552 // CHECK1-NEXT:    br label [[COND_END]]
553 // CHECK1:       cond.end:
554 // CHECK1-NEXT:    [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
555 // CHECK1-NEXT:    store float [[COND]], float* [[TMP29]], align 4
556 // CHECK1-NEXT:    ret void
557 //
558 //
559 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
560 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
561 // CHECK1-NEXT:  entry:
562 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
563 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
564 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
565 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
566 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
567 // CHECK1-NEXT:    ret %struct.S* [[THIS1]]
568 //
569 //
570 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
571 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
572 // CHECK1-NEXT:  entry:
573 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
574 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
575 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
576 // CHECK1-NEXT:    ret float 0.000000e+00
577 //
578 //
579 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
580 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
581 // CHECK1-NEXT:  entry:
582 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
583 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
584 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
585 // CHECK1-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
586 // CHECK1-NEXT:    ret void
587 //
588 //
589 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
590 // CHECK1-SAME: () #[[ATTR6]] {
591 // CHECK1-NEXT:  entry:
592 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
593 // CHECK1-NEXT:    [[T:%.*]] = alloca i32, align 4
594 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
595 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
596 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
597 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
598 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
599 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
600 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
601 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
602 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
603 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
604 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
605 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
606 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
607 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
608 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
609 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
610 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
611 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.S.0*, %struct.S.0*, i32*, [2 x i32]*, [2 x %struct.S.0]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]])
612 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
613 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
614 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
615 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
616 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
617 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
618 // CHECK1:       arraydestroy.body:
619 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
620 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
621 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
622 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
623 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
624 // CHECK1:       arraydestroy.done1:
625 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
626 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
627 // CHECK1-NEXT:    ret i32 [[TMP2]]
628 //
629 //
630 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
631 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
632 // CHECK1-NEXT:  entry:
633 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
634 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
635 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
636 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
637 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
638 // CHECK1-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP0]] to float
639 // CHECK1-NEXT:    store float [[CONV]], float* [[F]], align 4
640 // CHECK1-NEXT:    ret void
641 //
642 //
643 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
644 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
645 // CHECK1-NEXT:  entry:
646 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
647 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
648 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
649 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
650 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
651 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
652 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
653 // CHECK1-NEXT:    [[CONV:%.*]] = fpext float [[TMP0]] to double
654 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
655 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
656 // CHECK1-NEXT:    [[CONV2:%.*]] = fptrunc double [[ADD]] to float
657 // CHECK1-NEXT:    store float [[CONV2]], float* [[F]], align 4
658 // CHECK1-NEXT:    ret void
659 //
660 //
661 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
662 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
663 // CHECK1-NEXT:  entry:
664 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
665 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
666 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
667 // CHECK1-NEXT:    ret void
668 //
669 //
670 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
671 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
672 // CHECK1-NEXT:  entry:
673 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
674 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
675 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
676 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
677 // CHECK1-NEXT:    ret void
678 //
679 //
680 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
681 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
682 // CHECK1-NEXT:  entry:
683 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
684 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
685 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
686 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
687 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
688 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
689 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
690 // CHECK1-NEXT:    ret void
691 //
692 //
693 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
694 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3]] {
695 // CHECK1-NEXT:  entry:
696 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
697 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
698 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
699 // CHECK1-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
700 // CHECK1-NEXT:    [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
701 // CHECK1-NEXT:    [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
702 // CHECK1-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
703 // CHECK1-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
704 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
705 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
706 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
707 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
708 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
709 // CHECK1-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
710 // CHECK1-NEXT:    [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
711 // CHECK1-NEXT:    [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 4
712 // CHECK1-NEXT:    [[T_VAR15:%.*]] = alloca i32, align 4
713 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
714 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
715 // CHECK1-NEXT:    [[REF_TMP13:%.*]] = alloca [[STRUCT_S_0]], align 4
716 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
717 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
718 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
719 // CHECK1-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
720 // CHECK1-NEXT:    store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
721 // CHECK1-NEXT:    store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
722 // CHECK1-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
723 // CHECK1-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
724 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
725 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
726 // CHECK1-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
727 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
728 // CHECK1-NEXT:    [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
729 // CHECK1-NEXT:    [[TMP5:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
730 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
731 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
732 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
733 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
734 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR2]], align 4
735 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
736 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
737 // CHECK1-NEXT:    store i32 2147483647, i32* [[T_VAR15]], align 4
738 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
739 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
740 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
741 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
742 // CHECK1-NEXT:    [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 1
743 // CHECK1-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 1
744 // CHECK1-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
745 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
746 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
747 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
748 // CHECK1:       omp.inner.for.cond:
749 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
750 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
751 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
752 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
753 // CHECK1:       omp.inner.for.body:
754 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
755 // CHECK1-NEXT:    switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
756 // CHECK1-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
757 // CHECK1-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE6:%.*]]
758 // CHECK1-NEXT:    ]
759 // CHECK1:       .omp.sections.case:
760 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
761 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
762 // CHECK1-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4
763 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
764 // CHECK1:       .omp.sections.case6:
765 // CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP5]], i64 0, i64 0
766 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
767 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
768 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
769 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
770 // CHECK1:       .omp.sections.exit:
771 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
772 // CHECK1:       omp.inner.for.inc:
773 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
774 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP18]], 1
775 // CHECK1-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
776 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
777 // CHECK1:       omp.inner.for.end:
778 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
779 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
780 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i32* [[T_VAR2]] to i8*
781 // CHECK1-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
782 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
783 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
784 // CHECK1-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
785 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
786 // CHECK1-NEXT:    [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
787 // CHECK1-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
788 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
789 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i32* [[T_VAR15]] to i8*
790 // CHECK1-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
791 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
792 // CHECK1-NEXT:    [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP27]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
793 // CHECK1-NEXT:    switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
794 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
795 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
796 // CHECK1-NEXT:    ]
797 // CHECK1:       .omp.reduction.case1:
798 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP0]], align 4
799 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[T_VAR2]], align 4
800 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
801 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
802 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
803 // CHECK1-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
804 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
805 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
806 // CHECK1-NEXT:    [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
807 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL8]], 0
808 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
809 // CHECK1:       land.rhs:
810 // CHECK1-NEXT:    [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
811 // CHECK1-NEXT:    [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
812 // CHECK1-NEXT:    br label [[LAND_END]]
813 // CHECK1:       land.end:
814 // CHECK1-NEXT:    [[TMP33:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL10]], [[LAND_RHS]] ]
815 // CHECK1-NEXT:    [[CONV:%.*]] = zext i1 [[TMP33]] to i32
816 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
817 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
818 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
819 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
820 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
821 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP3]], align 4
822 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[T_VAR15]], align 4
823 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP36]], [[TMP37]]
824 // CHECK1-NEXT:    br i1 [[CMP11]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
825 // CHECK1:       cond.true:
826 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP3]], align 4
827 // CHECK1-NEXT:    br label [[COND_END:%.*]]
828 // CHECK1:       cond.false:
829 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 4
830 // CHECK1-NEXT:    br label [[COND_END]]
831 // CHECK1:       cond.end:
832 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP38]], [[COND_TRUE]] ], [ [[TMP39]], [[COND_FALSE]] ]
833 // CHECK1-NEXT:    store i32 [[COND]], i32* [[TMP3]], align 4
834 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
835 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
836 // CHECK1:       .omp.reduction.case2:
837 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[T_VAR2]], align 4
838 // CHECK1-NEXT:    [[TMP41:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP40]] monotonic, align 4
839 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
840 // CHECK1-NEXT:    [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
841 // CHECK1-NEXT:    [[TMP42:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
842 // CHECK1-NEXT:    [[TMP43:%.*]] = bitcast %struct.S.0* [[CALL12]] to i8*
843 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 4, i1 false)
844 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
845 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
846 // CHECK1-NEXT:    [[CALL14:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
847 // CHECK1-NEXT:    [[TOBOOL15:%.*]] = icmp ne i32 [[CALL14]], 0
848 // CHECK1-NEXT:    br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
849 // CHECK1:       land.rhs16:
850 // CHECK1-NEXT:    [[CALL17:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
851 // CHECK1-NEXT:    [[TOBOOL18:%.*]] = icmp ne i32 [[CALL17]], 0
852 // CHECK1-NEXT:    br label [[LAND_END19]]
853 // CHECK1:       land.end19:
854 // CHECK1-NEXT:    [[TMP44:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
855 // CHECK1-NEXT:    [[CONV20:%.*]] = zext i1 [[TMP44]] to i32
856 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]], i32 [[CONV20]])
857 // CHECK1-NEXT:    [[TMP45:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
858 // CHECK1-NEXT:    [[TMP46:%.*]] = bitcast %struct.S.0* [[REF_TMP13]] to i8*
859 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
860 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR4]]
861 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
862 // CHECK1-NEXT:    [[TMP47:%.*]] = load i32, i32* [[T_VAR15]], align 4
863 // CHECK1-NEXT:    [[TMP48:%.*]] = atomicrmw min i32* [[TMP3]], i32 [[TMP47]] monotonic, align 4
864 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
865 // CHECK1:       .omp.reduction.default:
866 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
867 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
868 // CHECK1-NEXT:    ret void
869 //
870 //
871 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
872 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
873 // CHECK1-NEXT:  entry:
874 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
875 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
876 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
877 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
878 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
879 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
880 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
881 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
882 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
883 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
884 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
885 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
886 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
887 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
888 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
889 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
890 // CHECK1-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
891 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
892 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
893 // CHECK1-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
894 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
895 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
896 // CHECK1-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
897 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
898 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
899 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
900 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
901 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
902 // CHECK1-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
903 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
904 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
905 // CHECK1-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
906 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
907 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
908 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
909 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
910 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
911 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
912 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
913 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
914 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
915 // CHECK1-NEXT:    [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
916 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
917 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
918 // CHECK1:       land.rhs:
919 // CHECK1-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
920 // CHECK1-NEXT:    [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
921 // CHECK1-NEXT:    br label [[LAND_END]]
922 // CHECK1:       land.end:
923 // CHECK1-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
924 // CHECK1-NEXT:    [[CONV:%.*]] = zext i1 [[TMP34]] to i32
925 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
926 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
927 // CHECK1-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
928 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
929 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
930 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
931 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
932 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
933 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
934 // CHECK1:       cond.true:
935 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
936 // CHECK1-NEXT:    br label [[COND_END:%.*]]
937 // CHECK1:       cond.false:
938 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
939 // CHECK1-NEXT:    br label [[COND_END]]
940 // CHECK1:       cond.end:
941 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
942 // CHECK1-NEXT:    store i32 [[COND]], i32* [[TMP29]], align 4
943 // CHECK1-NEXT:    ret void
944 //
945 //
946 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
947 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
948 // CHECK1-NEXT:  entry:
949 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
950 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
951 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
952 // CHECK1-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
953 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
954 // CHECK1-NEXT:    ret %struct.S.0* [[THIS1]]
955 //
956 //
957 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
958 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
959 // CHECK1-NEXT:  entry:
960 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
961 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
962 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
963 // CHECK1-NEXT:    ret i32 0
964 //
965 //
966 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
967 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
968 // CHECK1-NEXT:  entry:
969 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
970 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
971 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
972 // CHECK1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
973 // CHECK1-NEXT:    ret void
974 //
975 //
976 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
977 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
978 // CHECK1-NEXT:  entry:
979 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
980 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
981 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
982 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
983 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
984 // CHECK1-NEXT:    [[CONV:%.*]] = fptosi double [[TMP0]] to i32
985 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[F]], align 4
986 // CHECK1-NEXT:    ret void
987 //
988 //
989 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
990 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
991 // CHECK1-NEXT:  entry:
992 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
993 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
994 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
995 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
996 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
997 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
998 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
999 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
1000 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1001 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1002 // CHECK1-NEXT:    [[CONV2:%.*]] = fptosi double [[ADD]] to i32
1003 // CHECK1-NEXT:    store i32 [[CONV2]], i32* [[F]], align 4
1004 // CHECK1-NEXT:    ret void
1005 //
1006 //
1007 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1008 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1009 // CHECK1-NEXT:  entry:
1010 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1011 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1012 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1013 // CHECK1-NEXT:    ret void
1014 //
1015 //
1016 // CHECK2-LABEL: define {{[^@]+}}@main
1017 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1018 // CHECK2-NEXT:  entry:
1019 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1020 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1021 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
1022 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
1023 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1024 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
1025 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
1026 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
1027 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1028 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
1029 // CHECK2-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
1030 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1031 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1032 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1033 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
1034 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1035 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
1036 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
1037 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
1038 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float*, %struct.S*, %struct.S*, float*, [2 x i32]*, [2 x %struct.S]*)* @.omp_outlined. to void (i32*, i32*, ...)*), float* [[T_VAR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S]* [[S_ARR]])
1039 // CHECK2-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1040 // CHECK2-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1041 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
1042 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1043 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1044 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1045 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1046 // CHECK2:       arraydestroy.body:
1047 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1048 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1049 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1050 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1051 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1052 // CHECK2:       arraydestroy.done1:
1053 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1054 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
1055 // CHECK2-NEXT:    ret i32 [[TMP2]]
1056 //
1057 //
1058 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1059 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1060 // CHECK2-NEXT:  entry:
1061 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1062 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1063 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1064 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1065 // CHECK2-NEXT:    ret void
1066 //
1067 //
1068 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1069 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1070 // CHECK2-NEXT:  entry:
1071 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1072 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1073 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1074 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1075 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1076 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1077 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1078 // CHECK2-NEXT:    ret void
1079 //
1080 //
1081 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
1082 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3:[0-9]+]] {
1083 // CHECK2-NEXT:  entry:
1084 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1085 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1086 // CHECK2-NEXT:    [[T_VAR_ADDR:%.*]] = alloca float*, align 8
1087 // CHECK2-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
1088 // CHECK2-NEXT:    [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
1089 // CHECK2-NEXT:    [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
1090 // CHECK2-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1091 // CHECK2-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
1092 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1093 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1094 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1095 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1096 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1097 // CHECK2-NEXT:    [[T_VAR2:%.*]] = alloca float, align 4
1098 // CHECK2-NEXT:    [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1099 // CHECK2-NEXT:    [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
1100 // CHECK2-NEXT:    [[T_VAR15:%.*]] = alloca float, align 4
1101 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
1102 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
1103 // CHECK2-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
1104 // CHECK2-NEXT:    [[TMP:%.*]] = alloca float, align 4
1105 // CHECK2-NEXT:    [[REF_TMP17:%.*]] = alloca [[STRUCT_S]], align 4
1106 // CHECK2-NEXT:    [[ATOMIC_TEMP27:%.*]] = alloca float, align 4
1107 // CHECK2-NEXT:    [[_TMP28:%.*]] = alloca float, align 4
1108 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1109 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1110 // CHECK2-NEXT:    store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
1111 // CHECK2-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
1112 // CHECK2-NEXT:    store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
1113 // CHECK2-NEXT:    store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
1114 // CHECK2-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1115 // CHECK2-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1116 // CHECK2-NEXT:    [[TMP0:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
1117 // CHECK2-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
1118 // CHECK2-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
1119 // CHECK2-NEXT:    [[TMP3:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
1120 // CHECK2-NEXT:    [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1121 // CHECK2-NEXT:    [[TMP5:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1122 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1123 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1124 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1125 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1126 // CHECK2-NEXT:    store float 0.000000e+00, float* [[T_VAR2]], align 4
1127 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
1128 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
1129 // CHECK2-NEXT:    store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
1130 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1131 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1132 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1133 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1134 // CHECK2-NEXT:    [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 0
1135 // CHECK2-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 0
1136 // CHECK2-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1137 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1138 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1139 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1140 // CHECK2:       omp.inner.for.cond:
1141 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1142 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1143 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
1144 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1145 // CHECK2:       omp.inner.for.body:
1146 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1147 // CHECK2-NEXT:    switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1148 // CHECK2-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1149 // CHECK2-NEXT:    ]
1150 // CHECK2:       .omp.sections.case:
1151 // CHECK2-NEXT:    [[TMP15:%.*]] = load float, float* [[T_VAR2]], align 4
1152 // CHECK2-NEXT:    [[CONV:%.*]] = fptosi float [[TMP15]] to i32
1153 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
1154 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
1155 // CHECK2-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 0
1156 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
1157 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
1158 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
1159 // CHECK2-NEXT:    [[TMP18:%.*]] = load float, float* [[T_VAR15]], align 4
1160 // CHECK2-NEXT:    [[CONV7:%.*]] = fptosi float [[TMP18]] to i32
1161 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 1
1162 // CHECK2-NEXT:    store i32 [[CONV7]], i32* [[ARRAYIDX8]], align 4
1163 // CHECK2-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 1
1164 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
1165 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
1166 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
1167 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1168 // CHECK2:       .omp.sections.exit:
1169 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1170 // CHECK2:       omp.inner.for.inc:
1171 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1172 // CHECK2-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP21]], 1
1173 // CHECK2-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1174 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
1175 // CHECK2:       omp.inner.for.end:
1176 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
1177 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1178 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast float* [[T_VAR2]] to i8*
1179 // CHECK2-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
1180 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1181 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
1182 // CHECK2-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
1183 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
1184 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
1185 // CHECK2-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
1186 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
1187 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast float* [[T_VAR15]] to i8*
1188 // CHECK2-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
1189 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1190 // CHECK2-NEXT:    [[TMP31:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP30]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1191 // CHECK2-NEXT:    switch i32 [[TMP31]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1192 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1193 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1194 // CHECK2-NEXT:    ]
1195 // CHECK2:       .omp.reduction.case1:
1196 // CHECK2-NEXT:    [[TMP32:%.*]] = load float, float* [[TMP0]], align 4
1197 // CHECK2-NEXT:    [[TMP33:%.*]] = load float, float* [[T_VAR2]], align 4
1198 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP32]], [[TMP33]]
1199 // CHECK2-NEXT:    store float [[ADD]], float* [[TMP0]], align 4
1200 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
1201 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
1202 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[CALL]] to i8*
1203 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
1204 // CHECK2-NEXT:    [[CALL10:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
1205 // CHECK2-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL10]], 0.000000e+00
1206 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1207 // CHECK2:       land.rhs:
1208 // CHECK2-NEXT:    [[CALL11:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
1209 // CHECK2-NEXT:    [[TOBOOL12:%.*]] = fcmp une float [[CALL11]], 0.000000e+00
1210 // CHECK2-NEXT:    br label [[LAND_END]]
1211 // CHECK2:       land.end:
1212 // CHECK2-NEXT:    [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL12]], [[LAND_RHS]] ]
1213 // CHECK2-NEXT:    [[CONV13:%.*]] = uitofp i1 [[TMP36]] to float
1214 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV13]])
1215 // CHECK2-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
1216 // CHECK2-NEXT:    [[TMP38:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
1217 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
1218 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1219 // CHECK2-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP3]], align 4
1220 // CHECK2-NEXT:    [[TMP40:%.*]] = load float, float* [[T_VAR15]], align 4
1221 // CHECK2-NEXT:    [[CMP14:%.*]] = fcmp olt float [[TMP39]], [[TMP40]]
1222 // CHECK2-NEXT:    br i1 [[CMP14]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1223 // CHECK2:       cond.true:
1224 // CHECK2-NEXT:    [[TMP41:%.*]] = load float, float* [[TMP3]], align 4
1225 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1226 // CHECK2:       cond.false:
1227 // CHECK2-NEXT:    [[TMP42:%.*]] = load float, float* [[T_VAR15]], align 4
1228 // CHECK2-NEXT:    br label [[COND_END]]
1229 // CHECK2:       cond.end:
1230 // CHECK2-NEXT:    [[COND:%.*]] = phi float [ [[TMP41]], [[COND_TRUE]] ], [ [[TMP42]], [[COND_FALSE]] ]
1231 // CHECK2-NEXT:    store float [[COND]], float* [[TMP3]], align 4
1232 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1233 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1234 // CHECK2:       .omp.reduction.case2:
1235 // CHECK2-NEXT:    [[TMP43:%.*]] = load float, float* [[T_VAR2]], align 4
1236 // CHECK2-NEXT:    [[TMP44:%.*]] = bitcast float* [[TMP0]] to i32*
1237 // CHECK2-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP44]] monotonic, align 4
1238 // CHECK2-NEXT:    br label [[ATOMIC_CONT:%.*]]
1239 // CHECK2:       atomic_cont:
1240 // CHECK2-NEXT:    [[TMP45:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP53:%.*]], [[ATOMIC_CONT]] ]
1241 // CHECK2-NEXT:    [[TMP46:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
1242 // CHECK2-NEXT:    [[TMP47:%.*]] = bitcast i32 [[TMP45]] to float
1243 // CHECK2-NEXT:    store float [[TMP47]], float* [[TMP]], align 4
1244 // CHECK2-NEXT:    [[TMP48:%.*]] = load float, float* [[TMP]], align 4
1245 // CHECK2-NEXT:    [[TMP49:%.*]] = load float, float* [[T_VAR2]], align 4
1246 // CHECK2-NEXT:    [[ADD15:%.*]] = fadd float [[TMP48]], [[TMP49]]
1247 // CHECK2-NEXT:    store float [[ADD15]], float* [[ATOMIC_TEMP]], align 4
1248 // CHECK2-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP46]], align 4
1249 // CHECK2-NEXT:    [[TMP51:%.*]] = bitcast float* [[TMP0]] to i32*
1250 // CHECK2-NEXT:    [[TMP52:%.*]] = cmpxchg i32* [[TMP51]], i32 [[TMP45]], i32 [[TMP50]] monotonic monotonic, align 4
1251 // CHECK2-NEXT:    [[TMP53]] = extractvalue { i32, i1 } [[TMP52]], 0
1252 // CHECK2-NEXT:    [[TMP54:%.*]] = extractvalue { i32, i1 } [[TMP52]], 1
1253 // CHECK2-NEXT:    br i1 [[TMP54]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1254 // CHECK2:       atomic_exit:
1255 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1256 // CHECK2-NEXT:    [[CALL16:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
1257 // CHECK2-NEXT:    [[TMP55:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
1258 // CHECK2-NEXT:    [[TMP56:%.*]] = bitcast %struct.S* [[CALL16]] to i8*
1259 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP55]], i8* align 4 [[TMP56]], i64 4, i1 false)
1260 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1261 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1262 // CHECK2-NEXT:    [[CALL18:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
1263 // CHECK2-NEXT:    [[TOBOOL19:%.*]] = fcmp une float [[CALL18]], 0.000000e+00
1264 // CHECK2-NEXT:    br i1 [[TOBOOL19]], label [[LAND_RHS20:%.*]], label [[LAND_END23:%.*]]
1265 // CHECK2:       land.rhs20:
1266 // CHECK2-NEXT:    [[CALL21:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
1267 // CHECK2-NEXT:    [[TOBOOL22:%.*]] = fcmp une float [[CALL21]], 0.000000e+00
1268 // CHECK2-NEXT:    br label [[LAND_END23]]
1269 // CHECK2:       land.end23:
1270 // CHECK2-NEXT:    [[TMP57:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL22]], [[LAND_RHS20]] ]
1271 // CHECK2-NEXT:    [[CONV24:%.*]] = uitofp i1 [[TMP57]] to float
1272 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]], float [[CONV24]])
1273 // CHECK2-NEXT:    [[TMP58:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
1274 // CHECK2-NEXT:    [[TMP59:%.*]] = bitcast %struct.S* [[REF_TMP17]] to i8*
1275 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP58]], i8* align 4 [[TMP59]], i64 4, i1 false)
1276 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]]) #[[ATTR4]]
1277 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1278 // CHECK2-NEXT:    [[TMP60:%.*]] = load float, float* [[T_VAR15]], align 4
1279 // CHECK2-NEXT:    [[TMP61:%.*]] = bitcast float* [[TMP3]] to i32*
1280 // CHECK2-NEXT:    [[ATOMIC_LOAD25:%.*]] = load atomic i32, i32* [[TMP61]] monotonic, align 4
1281 // CHECK2-NEXT:    br label [[ATOMIC_CONT26:%.*]]
1282 // CHECK2:       atomic_cont26:
1283 // CHECK2-NEXT:    [[TMP62:%.*]] = phi i32 [ [[ATOMIC_LOAD25]], [[LAND_END23]] ], [ [[TMP72:%.*]], [[COND_END32:%.*]] ]
1284 // CHECK2-NEXT:    [[TMP63:%.*]] = bitcast float* [[ATOMIC_TEMP27]] to i32*
1285 // CHECK2-NEXT:    [[TMP64:%.*]] = bitcast i32 [[TMP62]] to float
1286 // CHECK2-NEXT:    store float [[TMP64]], float* [[_TMP28]], align 4
1287 // CHECK2-NEXT:    [[TMP65:%.*]] = load float, float* [[_TMP28]], align 4
1288 // CHECK2-NEXT:    [[TMP66:%.*]] = load float, float* [[T_VAR15]], align 4
1289 // CHECK2-NEXT:    [[CMP29:%.*]] = fcmp olt float [[TMP65]], [[TMP66]]
1290 // CHECK2-NEXT:    br i1 [[CMP29]], label [[COND_TRUE30:%.*]], label [[COND_FALSE31:%.*]]
1291 // CHECK2:       cond.true30:
1292 // CHECK2-NEXT:    [[TMP67:%.*]] = load float, float* [[_TMP28]], align 4
1293 // CHECK2-NEXT:    br label [[COND_END32]]
1294 // CHECK2:       cond.false31:
1295 // CHECK2-NEXT:    [[TMP68:%.*]] = load float, float* [[T_VAR15]], align 4
1296 // CHECK2-NEXT:    br label [[COND_END32]]
1297 // CHECK2:       cond.end32:
1298 // CHECK2-NEXT:    [[COND33:%.*]] = phi float [ [[TMP67]], [[COND_TRUE30]] ], [ [[TMP68]], [[COND_FALSE31]] ]
1299 // CHECK2-NEXT:    store float [[COND33]], float* [[ATOMIC_TEMP27]], align 4
1300 // CHECK2-NEXT:    [[TMP69:%.*]] = load i32, i32* [[TMP63]], align 4
1301 // CHECK2-NEXT:    [[TMP70:%.*]] = bitcast float* [[TMP3]] to i32*
1302 // CHECK2-NEXT:    [[TMP71:%.*]] = cmpxchg i32* [[TMP70]], i32 [[TMP62]], i32 [[TMP69]] monotonic monotonic, align 4
1303 // CHECK2-NEXT:    [[TMP72]] = extractvalue { i32, i1 } [[TMP71]], 0
1304 // CHECK2-NEXT:    [[TMP73:%.*]] = extractvalue { i32, i1 } [[TMP71]], 1
1305 // CHECK2-NEXT:    br i1 [[TMP73]], label [[ATOMIC_EXIT34:%.*]], label [[ATOMIC_CONT26]]
1306 // CHECK2:       atomic_exit34:
1307 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1308 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1309 // CHECK2:       .omp.reduction.default:
1310 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
1311 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
1312 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP7]])
1313 // CHECK2-NEXT:    ret void
1314 //
1315 //
1316 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1317 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1318 // CHECK2-NEXT:  entry:
1319 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1320 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1321 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1322 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1323 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1324 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1325 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
1326 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1327 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
1328 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
1329 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1330 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
1331 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
1332 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1333 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
1334 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
1335 // CHECK2-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
1336 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
1337 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
1338 // CHECK2-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1339 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
1340 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
1341 // CHECK2-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
1342 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
1343 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
1344 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
1345 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
1346 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
1347 // CHECK2-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
1348 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
1349 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
1350 // CHECK2-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
1351 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
1352 // CHECK2-NEXT:    [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
1353 // CHECK2-NEXT:    [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
1354 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
1355 // CHECK2-NEXT:    store float [[ADD]], float* [[TMP11]], align 4
1356 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
1357 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
1358 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
1359 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
1360 // CHECK2-NEXT:    [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
1361 // CHECK2-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
1362 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1363 // CHECK2:       land.rhs:
1364 // CHECK2-NEXT:    [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
1365 // CHECK2-NEXT:    [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
1366 // CHECK2-NEXT:    br label [[LAND_END]]
1367 // CHECK2:       land.end:
1368 // CHECK2-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
1369 // CHECK2-NEXT:    [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
1370 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
1371 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
1372 // CHECK2-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
1373 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
1374 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1375 // CHECK2-NEXT:    [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
1376 // CHECK2-NEXT:    [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
1377 // CHECK2-NEXT:    [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
1378 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1379 // CHECK2:       cond.true:
1380 // CHECK2-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
1381 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1382 // CHECK2:       cond.false:
1383 // CHECK2-NEXT:    [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
1384 // CHECK2-NEXT:    br label [[COND_END]]
1385 // CHECK2:       cond.end:
1386 // CHECK2-NEXT:    [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
1387 // CHECK2-NEXT:    store float [[COND]], float* [[TMP29]], align 4
1388 // CHECK2-NEXT:    ret void
1389 //
1390 //
1391 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
1392 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
1393 // CHECK2-NEXT:  entry:
1394 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1395 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1396 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1397 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1398 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1399 // CHECK2-NEXT:    ret %struct.S* [[THIS1]]
1400 //
1401 //
1402 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
1403 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
1404 // CHECK2-NEXT:  entry:
1405 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1406 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1407 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1408 // CHECK2-NEXT:    ret float 0.000000e+00
1409 //
1410 //
1411 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1412 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1413 // CHECK2-NEXT:  entry:
1414 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1415 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1416 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1417 // CHECK2-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1418 // CHECK2-NEXT:    ret void
1419 //
1420 //
1421 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1422 // CHECK2-SAME: () #[[ATTR6]] {
1423 // CHECK2-NEXT:  entry:
1424 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1425 // CHECK2-NEXT:    [[T:%.*]] = alloca i32, align 4
1426 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1427 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1428 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1429 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1430 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1431 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
1432 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
1433 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1434 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1435 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1436 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
1437 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1438 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
1439 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1440 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
1441 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
1442 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
1443 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.S.0*, %struct.S.0*, i32*, [2 x i32]*, [2 x %struct.S.0]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]])
1444 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1445 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
1446 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1447 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1448 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1449 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1450 // CHECK2:       arraydestroy.body:
1451 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1452 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1453 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1454 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1455 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1456 // CHECK2:       arraydestroy.done1:
1457 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1458 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
1459 // CHECK2-NEXT:    ret i32 [[TMP2]]
1460 //
1461 //
1462 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1463 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1464 // CHECK2-NEXT:  entry:
1465 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1466 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1467 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1468 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1469 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
1470 // CHECK2-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP0]] to float
1471 // CHECK2-NEXT:    store float [[CONV]], float* [[F]], align 4
1472 // CHECK2-NEXT:    ret void
1473 //
1474 //
1475 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1476 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1477 // CHECK2-NEXT:  entry:
1478 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1479 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1480 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1481 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1482 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1483 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1484 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1485 // CHECK2-NEXT:    [[CONV:%.*]] = fpext float [[TMP0]] to double
1486 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1487 // CHECK2-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1488 // CHECK2-NEXT:    [[CONV2:%.*]] = fptrunc double [[ADD]] to float
1489 // CHECK2-NEXT:    store float [[CONV2]], float* [[F]], align 4
1490 // CHECK2-NEXT:    ret void
1491 //
1492 //
1493 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1494 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1495 // CHECK2-NEXT:  entry:
1496 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1497 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1498 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1499 // CHECK2-NEXT:    ret void
1500 //
1501 //
1502 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1503 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1504 // CHECK2-NEXT:  entry:
1505 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1506 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1507 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1508 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1509 // CHECK2-NEXT:    ret void
1510 //
1511 //
1512 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1513 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1514 // CHECK2-NEXT:  entry:
1515 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1516 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1517 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1518 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1519 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1520 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1521 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1522 // CHECK2-NEXT:    ret void
1523 //
1524 //
1525 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
1526 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3]] {
1527 // CHECK2-NEXT:  entry:
1528 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1529 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1530 // CHECK2-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1531 // CHECK2-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
1532 // CHECK2-NEXT:    [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
1533 // CHECK2-NEXT:    [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
1534 // CHECK2-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1535 // CHECK2-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
1536 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1537 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1538 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1539 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1540 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1541 // CHECK2-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
1542 // CHECK2-NEXT:    [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1543 // CHECK2-NEXT:    [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 4
1544 // CHECK2-NEXT:    [[T_VAR15:%.*]] = alloca i32, align 4
1545 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
1546 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
1547 // CHECK2-NEXT:    [[REF_TMP13:%.*]] = alloca [[STRUCT_S_0]], align 4
1548 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1549 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1550 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1551 // CHECK2-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
1552 // CHECK2-NEXT:    store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
1553 // CHECK2-NEXT:    store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
1554 // CHECK2-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1555 // CHECK2-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
1556 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1557 // CHECK2-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
1558 // CHECK2-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
1559 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
1560 // CHECK2-NEXT:    [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1561 // CHECK2-NEXT:    [[TMP5:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
1562 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1563 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1564 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1565 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1566 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR2]], align 4
1567 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
1568 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
1569 // CHECK2-NEXT:    store i32 2147483647, i32* [[T_VAR15]], align 4
1570 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1571 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1572 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1573 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1574 // CHECK2-NEXT:    [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 1
1575 // CHECK2-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 1
1576 // CHECK2-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1577 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1578 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1579 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1580 // CHECK2:       omp.inner.for.cond:
1581 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1582 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1583 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
1584 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1585 // CHECK2:       omp.inner.for.body:
1586 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1587 // CHECK2-NEXT:    switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1588 // CHECK2-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1589 // CHECK2-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE6:%.*]]
1590 // CHECK2-NEXT:    ]
1591 // CHECK2:       .omp.sections.case:
1592 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
1593 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
1594 // CHECK2-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4
1595 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1596 // CHECK2:       .omp.sections.case6:
1597 // CHECK2-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP5]], i64 0, i64 0
1598 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
1599 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
1600 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
1601 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1602 // CHECK2:       .omp.sections.exit:
1603 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1604 // CHECK2:       omp.inner.for.inc:
1605 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1606 // CHECK2-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP18]], 1
1607 // CHECK2-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1608 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
1609 // CHECK2:       omp.inner.for.end:
1610 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
1611 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1612 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i32* [[T_VAR2]] to i8*
1613 // CHECK2-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
1614 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1615 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
1616 // CHECK2-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
1617 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
1618 // CHECK2-NEXT:    [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
1619 // CHECK2-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
1620 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
1621 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i32* [[T_VAR15]] to i8*
1622 // CHECK2-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
1623 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1624 // CHECK2-NEXT:    [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP27]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
1625 // CHECK2-NEXT:    switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1626 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1627 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1628 // CHECK2-NEXT:    ]
1629 // CHECK2:       .omp.reduction.case1:
1630 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP0]], align 4
1631 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[T_VAR2]], align 4
1632 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
1633 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
1634 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
1635 // CHECK2-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
1636 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
1637 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
1638 // CHECK2-NEXT:    [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
1639 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL8]], 0
1640 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1641 // CHECK2:       land.rhs:
1642 // CHECK2-NEXT:    [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
1643 // CHECK2-NEXT:    [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
1644 // CHECK2-NEXT:    br label [[LAND_END]]
1645 // CHECK2:       land.end:
1646 // CHECK2-NEXT:    [[TMP33:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL10]], [[LAND_RHS]] ]
1647 // CHECK2-NEXT:    [[CONV:%.*]] = zext i1 [[TMP33]] to i32
1648 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
1649 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
1650 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
1651 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
1652 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1653 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP3]], align 4
1654 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[T_VAR15]], align 4
1655 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP36]], [[TMP37]]
1656 // CHECK2-NEXT:    br i1 [[CMP11]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1657 // CHECK2:       cond.true:
1658 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP3]], align 4
1659 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1660 // CHECK2:       cond.false:
1661 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 4
1662 // CHECK2-NEXT:    br label [[COND_END]]
1663 // CHECK2:       cond.end:
1664 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP38]], [[COND_TRUE]] ], [ [[TMP39]], [[COND_FALSE]] ]
1665 // CHECK2-NEXT:    store i32 [[COND]], i32* [[TMP3]], align 4
1666 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1667 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1668 // CHECK2:       .omp.reduction.case2:
1669 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[T_VAR2]], align 4
1670 // CHECK2-NEXT:    [[TMP41:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP40]] monotonic, align 4
1671 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1672 // CHECK2-NEXT:    [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
1673 // CHECK2-NEXT:    [[TMP42:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
1674 // CHECK2-NEXT:    [[TMP43:%.*]] = bitcast %struct.S.0* [[CALL12]] to i8*
1675 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 4, i1 false)
1676 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1677 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1678 // CHECK2-NEXT:    [[CALL14:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
1679 // CHECK2-NEXT:    [[TOBOOL15:%.*]] = icmp ne i32 [[CALL14]], 0
1680 // CHECK2-NEXT:    br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
1681 // CHECK2:       land.rhs16:
1682 // CHECK2-NEXT:    [[CALL17:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
1683 // CHECK2-NEXT:    [[TOBOOL18:%.*]] = icmp ne i32 [[CALL17]], 0
1684 // CHECK2-NEXT:    br label [[LAND_END19]]
1685 // CHECK2:       land.end19:
1686 // CHECK2-NEXT:    [[TMP44:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
1687 // CHECK2-NEXT:    [[CONV20:%.*]] = zext i1 [[TMP44]] to i32
1688 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]], i32 [[CONV20]])
1689 // CHECK2-NEXT:    [[TMP45:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
1690 // CHECK2-NEXT:    [[TMP46:%.*]] = bitcast %struct.S.0* [[REF_TMP13]] to i8*
1691 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
1692 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR4]]
1693 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1694 // CHECK2-NEXT:    [[TMP47:%.*]] = load i32, i32* [[T_VAR15]], align 4
1695 // CHECK2-NEXT:    [[TMP48:%.*]] = atomicrmw min i32* [[TMP3]], i32 [[TMP47]] monotonic, align 4
1696 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1697 // CHECK2:       .omp.reduction.default:
1698 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
1699 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
1700 // CHECK2-NEXT:    ret void
1701 //
1702 //
1703 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
1704 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
1705 // CHECK2-NEXT:  entry:
1706 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1707 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1708 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1709 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1710 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1711 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1712 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
1713 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1714 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
1715 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
1716 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1717 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1718 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
1719 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1720 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1721 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
1722 // CHECK2-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
1723 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
1724 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
1725 // CHECK2-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1726 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
1727 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
1728 // CHECK2-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
1729 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
1730 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
1731 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
1732 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
1733 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
1734 // CHECK2-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
1735 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
1736 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
1737 // CHECK2-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
1738 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
1739 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
1740 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
1741 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
1742 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1743 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
1744 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
1745 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
1746 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
1747 // CHECK2-NEXT:    [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
1748 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
1749 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1750 // CHECK2:       land.rhs:
1751 // CHECK2-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
1752 // CHECK2-NEXT:    [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
1753 // CHECK2-NEXT:    br label [[LAND_END]]
1754 // CHECK2:       land.end:
1755 // CHECK2-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
1756 // CHECK2-NEXT:    [[CONV:%.*]] = zext i1 [[TMP34]] to i32
1757 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
1758 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
1759 // CHECK2-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
1760 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
1761 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1762 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
1763 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
1764 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
1765 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1766 // CHECK2:       cond.true:
1767 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
1768 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1769 // CHECK2:       cond.false:
1770 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
1771 // CHECK2-NEXT:    br label [[COND_END]]
1772 // CHECK2:       cond.end:
1773 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
1774 // CHECK2-NEXT:    store i32 [[COND]], i32* [[TMP29]], align 4
1775 // CHECK2-NEXT:    ret void
1776 //
1777 //
1778 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
1779 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
1780 // CHECK2-NEXT:  entry:
1781 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1782 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
1783 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1784 // CHECK2-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
1785 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1786 // CHECK2-NEXT:    ret %struct.S.0* [[THIS1]]
1787 //
1788 //
1789 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
1790 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
1791 // CHECK2-NEXT:  entry:
1792 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1793 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1794 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1795 // CHECK2-NEXT:    ret i32 0
1796 //
1797 //
1798 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1799 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1800 // CHECK2-NEXT:  entry:
1801 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1802 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1803 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1804 // CHECK2-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1805 // CHECK2-NEXT:    ret void
1806 //
1807 //
1808 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1809 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1810 // CHECK2-NEXT:  entry:
1811 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1812 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1813 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1814 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1815 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
1816 // CHECK2-NEXT:    [[CONV:%.*]] = fptosi double [[TMP0]] to i32
1817 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[F]], align 4
1818 // CHECK2-NEXT:    ret void
1819 //
1820 //
1821 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1822 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1823 // CHECK2-NEXT:  entry:
1824 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1825 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1826 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1827 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1828 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1829 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1830 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1831 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
1832 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1833 // CHECK2-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1834 // CHECK2-NEXT:    [[CONV2:%.*]] = fptosi double [[ADD]] to i32
1835 // CHECK2-NEXT:    store i32 [[CONV2]], i32* [[F]], align 4
1836 // CHECK2-NEXT:    ret void
1837 //
1838 //
1839 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1840 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1841 // CHECK2-NEXT:  entry:
1842 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1843 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1844 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1845 // CHECK2-NEXT:    ret void
1846 //
1847 //
1848 // CHECK3-LABEL: define {{[^@]+}}@main
1849 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
1850 // CHECK3-NEXT:  entry:
1851 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1852 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1853 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1854 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
1855 // CHECK3-NEXT:    ret i32 0
1856 //
1857 //
1858 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1859 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
1860 // CHECK3-NEXT:  entry:
1861 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1862 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1863 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1864 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1865 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1866 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1867 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1868 // CHECK3-NEXT:    [[G:%.*]] = alloca double, align 8
1869 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1870 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1871 // CHECK3-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca double, align 8
1872 // CHECK3-NEXT:    [[TMP:%.*]] = alloca double, align 8
1873 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1874 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1875 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1876 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1877 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1878 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1879 // CHECK3-NEXT:    store double 0.000000e+00, double* [[G]], align 8
1880 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1881 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1882 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1883 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1884 // CHECK3-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
1885 // CHECK3-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
1886 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1887 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1888 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1889 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1890 // CHECK3:       omp.inner.for.cond:
1891 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1892 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1893 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1894 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1895 // CHECK3:       omp.inner.for.body:
1896 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1897 // CHECK3-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1898 // CHECK3-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1899 // CHECK3-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
1900 // CHECK3-NEXT:    ]
1901 // CHECK3:       .omp.sections.case:
1902 // CHECK3-NEXT:    store double 1.000000e+00, double* [[G]], align 8
1903 // CHECK3-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1904 // CHECK3:       .omp.sections.case1:
1905 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1906 // CHECK3-NEXT:    store double* [[G]], double** [[TMP9]], align 8
1907 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(8) [[REF_TMP]])
1908 // CHECK3-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1909 // CHECK3:       .omp.sections.exit:
1910 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1911 // CHECK3:       omp.inner.for.inc:
1912 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1913 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP10]], 1
1914 // CHECK3-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1915 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
1916 // CHECK3:       omp.inner.for.end:
1917 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1918 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1919 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast double* [[G]] to i8*
1920 // CHECK3-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
1921 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1922 // CHECK3-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1923 // CHECK3-NEXT:    switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1924 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1925 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1926 // CHECK3-NEXT:    ]
1927 // CHECK3:       .omp.reduction.case1:
1928 // CHECK3-NEXT:    [[TMP15:%.*]] = load double, double* @g, align 8
1929 // CHECK3-NEXT:    [[TMP16:%.*]] = load double, double* [[G]], align 8
1930 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[TMP15]], [[TMP16]]
1931 // CHECK3-NEXT:    store double [[ADD]], double* @g, align 8
1932 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1933 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1934 // CHECK3:       .omp.reduction.case2:
1935 // CHECK3-NEXT:    [[TMP17:%.*]] = load double, double* [[G]], align 8
1936 // CHECK3-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
1937 // CHECK3-NEXT:    br label [[ATOMIC_CONT:%.*]]
1938 // CHECK3:       atomic_cont:
1939 // CHECK3-NEXT:    [[TMP18:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[ATOMIC_CONT]] ]
1940 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
1941 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i64 [[TMP18]] to double
1942 // CHECK3-NEXT:    store double [[TMP20]], double* [[TMP]], align 8
1943 // CHECK3-NEXT:    [[TMP21:%.*]] = load double, double* [[TMP]], align 8
1944 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[G]], align 8
1945 // CHECK3-NEXT:    [[ADD2:%.*]] = fadd double [[TMP21]], [[TMP22]]
1946 // CHECK3-NEXT:    store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
1947 // CHECK3-NEXT:    [[TMP23:%.*]] = load i64, i64* [[TMP19]], align 8
1948 // CHECK3-NEXT:    [[TMP24:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP18]], i64 [[TMP23]] monotonic monotonic, align 8
1949 // CHECK3-NEXT:    [[TMP25]] = extractvalue { i64, i1 } [[TMP24]], 0
1950 // CHECK3-NEXT:    [[TMP26:%.*]] = extractvalue { i64, i1 } [[TMP24]], 1
1951 // CHECK3-NEXT:    br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1952 // CHECK3:       atomic_exit:
1953 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1954 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1955 // CHECK3:       .omp.reduction.default:
1956 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
1957 // CHECK3-NEXT:    ret void
1958 //
1959 //
1960 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1961 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
1962 // CHECK3-NEXT:  entry:
1963 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1964 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1965 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1966 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1967 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1968 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1969 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1970 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1971 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1972 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1973 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
1974 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1975 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1976 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
1977 // CHECK3-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
1978 // CHECK3-NEXT:    [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
1979 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
1980 // CHECK3-NEXT:    store double [[ADD]], double* [[TMP11]], align 8
1981 // CHECK3-NEXT:    ret void
1982 //
1983 //
1984 // CHECK4-LABEL: define {{[^@]+}}@main
1985 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
1986 // CHECK4-NEXT:  entry:
1987 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1988 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1989 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1990 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
1991 // CHECK4-NEXT:    call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
1992 // CHECK4-NEXT:    ret i32 0
1993 //
1994 //
1995 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
1996 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
1997 // CHECK4-NEXT:  entry:
1998 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1999 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2000 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2001 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2002 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2003 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2004 // CHECK4-NEXT:    ret void
2005 //
2006 //
2007 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
2008 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
2009 // CHECK4-NEXT:  entry:
2010 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2011 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2012 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
2013 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
2014 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
2015 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
2016 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
2017 // CHECK4-NEXT:    [[G:%.*]] = alloca double, align 8
2018 // CHECK4-NEXT:    [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, align 8
2019 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2020 // CHECK4-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca double, align 8
2021 // CHECK4-NEXT:    [[TMP:%.*]] = alloca double, align 8
2022 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2023 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2024 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
2025 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2026 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
2027 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
2028 // CHECK4-NEXT:    store double 0.000000e+00, double* [[G]], align 8
2029 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2030 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2031 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
2032 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2033 // CHECK4-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
2034 // CHECK4-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
2035 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
2036 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
2037 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
2038 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2039 // CHECK4:       omp.inner.for.cond:
2040 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2041 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2042 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2043 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2044 // CHECK4:       omp.inner.for.body:
2045 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2046 // CHECK4-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
2047 // CHECK4-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
2048 // CHECK4-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
2049 // CHECK4-NEXT:    ]
2050 // CHECK4:       .omp.sections.case:
2051 // CHECK4-NEXT:    store double 1.000000e+00, double* [[G]], align 8
2052 // CHECK4-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
2053 // CHECK4:       .omp.sections.case1:
2054 // CHECK4-NEXT:    [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 0
2055 // CHECK4-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
2056 // CHECK4-NEXT:    [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 1
2057 // CHECK4-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
2058 // CHECK4-NEXT:    [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 2
2059 // CHECK4-NEXT:    store i32 0, i32* [[BLOCK_RESERVED]], align 4
2060 // CHECK4-NEXT:    [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 3
2061 // CHECK4-NEXT:    store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
2062 // CHECK4-NEXT:    [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 4
2063 // CHECK4-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
2064 // CHECK4-NEXT:    [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
2065 // CHECK4-NEXT:    [[TMP9:%.*]] = load volatile double, double* [[G]], align 8
2066 // CHECK4-NEXT:    store volatile double [[TMP9]], double* [[BLOCK_CAPTURED]], align 8
2067 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]] to void ()*
2068 // CHECK4-NEXT:    [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP10]] to %struct.__block_literal_generic*
2069 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
2070 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
2071 // CHECK4-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP11]], align 8
2072 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to void (i8*)*
2073 // CHECK4-NEXT:    call void [[TMP14]](i8* [[TMP12]])
2074 // CHECK4-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
2075 // CHECK4:       .omp.sections.exit:
2076 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2077 // CHECK4:       omp.inner.for.inc:
2078 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2079 // CHECK4-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP15]], 1
2080 // CHECK4-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
2081 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
2082 // CHECK4:       omp.inner.for.end:
2083 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2084 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2085 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast double* [[G]] to i8*
2086 // CHECK4-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
2087 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2088 // CHECK4-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2089 // CHECK4-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2090 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2091 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2092 // CHECK4-NEXT:    ]
2093 // CHECK4:       .omp.reduction.case1:
2094 // CHECK4-NEXT:    [[TMP20:%.*]] = load double, double* @g, align 8
2095 // CHECK4-NEXT:    [[TMP21:%.*]] = load double, double* [[G]], align 8
2096 // CHECK4-NEXT:    [[ADD:%.*]] = fadd double [[TMP20]], [[TMP21]]
2097 // CHECK4-NEXT:    store double [[ADD]], double* @g, align 8
2098 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2099 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2100 // CHECK4:       .omp.reduction.case2:
2101 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[G]], align 8
2102 // CHECK4-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
2103 // CHECK4-NEXT:    br label [[ATOMIC_CONT:%.*]]
2104 // CHECK4:       atomic_cont:
2105 // CHECK4-NEXT:    [[TMP23:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP30:%.*]], [[ATOMIC_CONT]] ]
2106 // CHECK4-NEXT:    [[TMP24:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
2107 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i64 [[TMP23]] to double
2108 // CHECK4-NEXT:    store double [[TMP25]], double* [[TMP]], align 8
2109 // CHECK4-NEXT:    [[TMP26:%.*]] = load double, double* [[TMP]], align 8
2110 // CHECK4-NEXT:    [[TMP27:%.*]] = load double, double* [[G]], align 8
2111 // CHECK4-NEXT:    [[ADD2:%.*]] = fadd double [[TMP26]], [[TMP27]]
2112 // CHECK4-NEXT:    store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
2113 // CHECK4-NEXT:    [[TMP28:%.*]] = load i64, i64* [[TMP24]], align 8
2114 // CHECK4-NEXT:    [[TMP29:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP23]], i64 [[TMP28]] monotonic monotonic, align 8
2115 // CHECK4-NEXT:    [[TMP30]] = extractvalue { i64, i1 } [[TMP29]], 0
2116 // CHECK4-NEXT:    [[TMP31:%.*]] = extractvalue { i64, i1 } [[TMP29]], 1
2117 // CHECK4-NEXT:    br i1 [[TMP31]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
2118 // CHECK4:       atomic_exit:
2119 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2120 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2121 // CHECK4:       .omp.reduction.default:
2122 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
2123 // CHECK4-NEXT:    ret void
2124 //
2125 //
2126 // CHECK4-LABEL: define {{[^@]+}}@_block_invoke
2127 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
2128 // CHECK4-NEXT:  entry:
2129 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2130 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*, align 8
2131 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2132 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*
2133 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>** [[BLOCK_ADDR]], align 8
2134 // CHECK4-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
2135 // CHECK4-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
2136 // CHECK4-NEXT:    ret void
2137 //
2138 //
2139 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2140 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
2141 // CHECK4-NEXT:  entry:
2142 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2143 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2144 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2145 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2146 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2147 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2148 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2149 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2150 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2151 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2152 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
2153 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2154 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2155 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
2156 // CHECK4-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
2157 // CHECK4-NEXT:    [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
2158 // CHECK4-NEXT:    [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
2159 // CHECK4-NEXT:    store double [[ADD]], double* [[TMP11]], align 8
2160 // CHECK4-NEXT:    ret void
2161 //
2162 //