1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
8 
9 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
10 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
12 
13 // RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
14 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
16 // RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
19 
20 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
23 
24 // expected-no-diagnostics
25 #ifndef HEADER
26 #define HEADER
27 
28 template <typename T>
tmain()29 T tmain() {
30   T t_var = T();
31   T vec[] = {1, 2};
32 #pragma omp target teams distribute parallel for simd reduction(+: t_var)
33   for (int i = 0; i < 2; ++i) {
34     t_var += (T) i;
35   }
36   return T();
37 }
38 
main()39 int main() {
40   static int sivar;
41 #ifdef LAMBDA
42 
43   [&]() {
44 #pragma omp target teams distribute parallel for simd reduction(+: sivar)
45   for (int i = 0; i < 2; ++i) {
46 
47     // Skip global and bound tid vars
48 
49 
50 
51     // Skip global and bound tid vars, and prev lb and ub vars
52     // skip loop vars
53 
54 
55     sivar += i;
56 
57     [&]() {
58 
59       sivar += 4;
60 
61     }();
62   }
63   }();
64   return 0;
65 #else
66 #pragma omp target teams distribute parallel for simd reduction(+: sivar)
67   for (int i = 0; i < 2; ++i) {
68     sivar += i;
69   }
70   return tmain<int>();
71 #endif
72 }
73 
74 
75 
76 
77 // Skip global and bound tid vars
78 
79 
80 // Skip global and bound tid vars, and prev lb and ub
81 // skip loop vars
82 
83 
84 
85 
86 // Skip global and bound tid vars
87 
88 
89 // Skip global and bound tid vars, and prev lb and ub vars
90 // skip loop vars
91 
92 #endif
93 // CHECK1-LABEL: define {{[^@]+}}@main
94 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
95 // CHECK1-NEXT:  entry:
96 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
97 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
98 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
99 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
100 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
101 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
102 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
103 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
104 // CHECK1-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 8
105 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
106 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
107 // CHECK1-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 8
108 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
109 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
110 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
111 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
112 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 2)
113 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
114 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
115 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
116 // CHECK1:       omp_offload.failed:
117 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(i32* @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
118 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
119 // CHECK1:       omp_offload.cont:
120 // CHECK1-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
121 // CHECK1-NEXT:    ret i32 [[CALL]]
122 //
123 //
124 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66
125 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
126 // CHECK1-NEXT:  entry:
127 // CHECK1-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
128 // CHECK1-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
129 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
130 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
131 // CHECK1-NEXT:    ret void
132 //
133 //
134 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
135 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
136 // CHECK1-NEXT:  entry:
137 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
138 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
139 // CHECK1-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
140 // CHECK1-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
141 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
142 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
143 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
144 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
145 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
146 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
147 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
148 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
149 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
150 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
151 // CHECK1-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
152 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
153 // CHECK1-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
154 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
155 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
156 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
157 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
158 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
159 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
160 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
161 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
162 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
163 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
164 // CHECK1:       cond.true:
165 // CHECK1-NEXT:    br label [[COND_END:%.*]]
166 // CHECK1:       cond.false:
167 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
168 // CHECK1-NEXT:    br label [[COND_END]]
169 // CHECK1:       cond.end:
170 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
171 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
172 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
173 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
174 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
175 // CHECK1:       omp.inner.for.cond:
176 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
177 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
178 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
179 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
180 // CHECK1:       omp.inner.for.body:
181 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
182 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
183 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
184 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
185 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[SIVAR1]])
186 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
187 // CHECK1:       omp.inner.for.inc:
188 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
189 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
190 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
191 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
192 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
193 // CHECK1:       omp.inner.for.end:
194 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
195 // CHECK1:       omp.loop.exit:
196 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
197 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
198 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
199 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
200 // CHECK1:       .omp.final.then:
201 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
202 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
203 // CHECK1:       .omp.final.done:
204 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
205 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
206 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
207 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
208 // CHECK1-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
209 // CHECK1-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
210 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
211 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
212 // CHECK1-NEXT:    ]
213 // CHECK1:       .omp.reduction.case1:
214 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
215 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
216 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
217 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
218 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
219 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
220 // CHECK1:       .omp.reduction.case2:
221 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
222 // CHECK1-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
223 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
224 // CHECK1:       .omp.reduction.default:
225 // CHECK1-NEXT:    ret void
226 //
227 //
228 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
229 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
230 // CHECK1-NEXT:  entry:
231 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
232 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
233 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
234 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
235 // CHECK1-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
236 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
237 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
238 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
239 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
240 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
241 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
242 // CHECK1-NEXT:    [[SIVAR2:%.*]] = alloca i32, align 4
243 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
244 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
245 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
246 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
247 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
248 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
249 // CHECK1-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
250 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
251 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
252 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
253 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
254 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
255 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
256 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
257 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
258 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
259 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
260 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
261 // CHECK1-NEXT:    store i32 0, i32* [[SIVAR2]], align 4
262 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
263 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
264 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
265 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
266 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
267 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
268 // CHECK1:       cond.true:
269 // CHECK1-NEXT:    br label [[COND_END:%.*]]
270 // CHECK1:       cond.false:
271 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
272 // CHECK1-NEXT:    br label [[COND_END]]
273 // CHECK1:       cond.end:
274 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
275 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
276 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
277 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
278 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
279 // CHECK1:       omp.inner.for.cond:
280 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
281 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
282 // CHECK1-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
283 // CHECK1-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
284 // CHECK1:       omp.inner.for.body:
285 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
286 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
287 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
288 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
289 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
290 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR2]], align 4
291 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
292 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[SIVAR2]], align 4
293 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
294 // CHECK1:       omp.body.continue:
295 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
296 // CHECK1:       omp.inner.for.inc:
297 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
298 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1
299 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
300 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
301 // CHECK1:       omp.inner.for.end:
302 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
303 // CHECK1:       omp.loop.exit:
304 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
305 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
306 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
307 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
308 // CHECK1:       .omp.final.then:
309 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
310 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
311 // CHECK1:       .omp.final.done:
312 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
313 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR2]] to i8*
314 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
315 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
316 // CHECK1-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
317 // CHECK1-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
318 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
319 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
320 // CHECK1-NEXT:    ]
321 // CHECK1:       .omp.reduction.case1:
322 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
323 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR2]], align 4
324 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
325 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
326 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
327 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
328 // CHECK1:       .omp.reduction.case2:
329 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR2]], align 4
330 // CHECK1-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
331 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
332 // CHECK1:       .omp.reduction.default:
333 // CHECK1-NEXT:    ret void
334 //
335 //
336 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
337 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
338 // CHECK1-NEXT:  entry:
339 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
340 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
341 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
342 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
343 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
344 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
345 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
346 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
347 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
348 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
349 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
350 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
351 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
352 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
353 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
354 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
355 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
356 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
357 // CHECK1-NEXT:    ret void
358 //
359 //
360 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
361 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
362 // CHECK1-NEXT:  entry:
363 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
364 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
365 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
366 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
367 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
368 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
369 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
370 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
371 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
372 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
373 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
374 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
375 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
376 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
377 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
378 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
379 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
380 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
381 // CHECK1-NEXT:    ret void
382 //
383 //
384 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
385 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] comdat {
386 // CHECK1-NEXT:  entry:
387 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
388 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
389 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
390 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
391 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
392 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
393 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
394 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
395 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
396 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
397 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
398 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 8
399 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
400 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
401 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 8
402 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
403 // CHECK1-NEXT:    store i8* null, i8** [[TMP5]], align 8
404 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
405 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
406 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 2)
407 // CHECK1-NEXT:    [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
408 // CHECK1-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
409 // CHECK1-NEXT:    br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
410 // CHECK1:       omp_offload.failed:
411 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR2]]
412 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
413 // CHECK1:       omp_offload.cont:
414 // CHECK1-NEXT:    ret i32 0
415 //
416 //
417 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
418 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
419 // CHECK1-NEXT:  entry:
420 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
421 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
422 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
423 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[TMP0]])
424 // CHECK1-NEXT:    ret void
425 //
426 //
427 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
428 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
429 // CHECK1-NEXT:  entry:
430 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
431 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
432 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
433 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
434 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
435 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
436 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
437 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
438 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
439 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
440 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
441 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
442 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
443 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
444 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
445 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
446 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
447 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
448 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
449 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
450 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
451 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
452 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
453 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
454 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
455 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
456 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
457 // CHECK1:       cond.true:
458 // CHECK1-NEXT:    br label [[COND_END:%.*]]
459 // CHECK1:       cond.false:
460 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
461 // CHECK1-NEXT:    br label [[COND_END]]
462 // CHECK1:       cond.end:
463 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
464 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
465 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
466 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
467 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
468 // CHECK1:       omp.inner.for.cond:
469 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
470 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
471 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
472 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
473 // CHECK1:       omp.inner.for.body:
474 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
475 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
476 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
477 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
478 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[T_VAR1]])
479 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
480 // CHECK1:       omp.inner.for.inc:
481 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
482 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
483 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
484 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
485 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
486 // CHECK1:       omp.inner.for.end:
487 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
488 // CHECK1:       omp.loop.exit:
489 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
490 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
491 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
492 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
493 // CHECK1:       .omp.final.then:
494 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
495 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
496 // CHECK1:       .omp.final.done:
497 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
498 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR1]] to i8*
499 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
500 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
501 // CHECK1-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
502 // CHECK1-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
503 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
504 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
505 // CHECK1-NEXT:    ]
506 // CHECK1:       .omp.reduction.case1:
507 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
508 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR1]], align 4
509 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
510 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
511 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
512 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
513 // CHECK1:       .omp.reduction.case2:
514 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR1]], align 4
515 // CHECK1-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
516 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
517 // CHECK1:       .omp.reduction.default:
518 // CHECK1-NEXT:    ret void
519 //
520 //
521 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
522 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
523 // CHECK1-NEXT:  entry:
524 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
525 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
526 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
527 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
528 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
529 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
530 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
531 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
532 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
533 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
534 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
535 // CHECK1-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
536 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
537 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
538 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
539 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
540 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
541 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
542 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
543 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
544 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
545 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
546 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
547 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
548 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
549 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
550 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
551 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
552 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
553 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
554 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR2]], align 4
555 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
556 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
557 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
558 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
559 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
560 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
561 // CHECK1:       cond.true:
562 // CHECK1-NEXT:    br label [[COND_END:%.*]]
563 // CHECK1:       cond.false:
564 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
565 // CHECK1-NEXT:    br label [[COND_END]]
566 // CHECK1:       cond.end:
567 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
568 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
569 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
570 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
571 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
572 // CHECK1:       omp.inner.for.cond:
573 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
574 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
575 // CHECK1-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
576 // CHECK1-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
577 // CHECK1:       omp.inner.for.body:
578 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
579 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
580 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
581 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
582 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
583 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[T_VAR2]], align 4
584 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
585 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[T_VAR2]], align 4
586 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
587 // CHECK1:       omp.body.continue:
588 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
589 // CHECK1:       omp.inner.for.inc:
590 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
591 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1
592 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
593 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
594 // CHECK1:       omp.inner.for.end:
595 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
596 // CHECK1:       omp.loop.exit:
597 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
598 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
599 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
600 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
601 // CHECK1:       .omp.final.then:
602 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
603 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
604 // CHECK1:       .omp.final.done:
605 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
606 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR2]] to i8*
607 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
608 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
609 // CHECK1-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
610 // CHECK1-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
611 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
612 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
613 // CHECK1-NEXT:    ]
614 // CHECK1:       .omp.reduction.case1:
615 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
616 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR2]], align 4
617 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
618 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
619 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
620 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
621 // CHECK1:       .omp.reduction.case2:
622 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR2]], align 4
623 // CHECK1-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
624 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
625 // CHECK1:       .omp.reduction.default:
626 // CHECK1-NEXT:    ret void
627 //
628 //
629 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
630 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
631 // CHECK1-NEXT:  entry:
632 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
633 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
634 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
635 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
636 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
637 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
638 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
639 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
640 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
641 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
642 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
643 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
644 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
645 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
646 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
647 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
648 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
649 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
650 // CHECK1-NEXT:    ret void
651 //
652 //
653 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
654 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
655 // CHECK1-NEXT:  entry:
656 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
657 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
658 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
659 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
660 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
661 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
662 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
663 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
664 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
665 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
666 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
667 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
668 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
669 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
670 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
671 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
672 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
673 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
674 // CHECK1-NEXT:    ret void
675 //
676 //
677 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
678 // CHECK1-SAME: () #[[ATTR7:[0-9]+]] {
679 // CHECK1-NEXT:  entry:
680 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
681 // CHECK1-NEXT:    ret void
682 //
683 //
684 // CHECK2-LABEL: define {{[^@]+}}@main
685 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
686 // CHECK2-NEXT:  entry:
687 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
688 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
689 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
690 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
691 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
692 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
693 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
694 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
695 // CHECK2-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 8
696 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
697 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
698 // CHECK2-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 8
699 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
700 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
701 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
702 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
703 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 2)
704 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
705 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
706 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
707 // CHECK2:       omp_offload.failed:
708 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(i32* @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
709 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
710 // CHECK2:       omp_offload.cont:
711 // CHECK2-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
712 // CHECK2-NEXT:    ret i32 [[CALL]]
713 //
714 //
715 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66
716 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
717 // CHECK2-NEXT:  entry:
718 // CHECK2-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
719 // CHECK2-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
720 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
721 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
722 // CHECK2-NEXT:    ret void
723 //
724 //
725 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
726 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
727 // CHECK2-NEXT:  entry:
728 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
729 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
730 // CHECK2-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
731 // CHECK2-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
732 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
733 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
734 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
735 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
736 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
737 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
738 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
739 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
740 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
741 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
742 // CHECK2-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
743 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
744 // CHECK2-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
745 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
746 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
747 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
748 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
749 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
750 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
751 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
752 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
753 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
754 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
755 // CHECK2:       cond.true:
756 // CHECK2-NEXT:    br label [[COND_END:%.*]]
757 // CHECK2:       cond.false:
758 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
759 // CHECK2-NEXT:    br label [[COND_END]]
760 // CHECK2:       cond.end:
761 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
762 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
763 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
764 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
765 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
766 // CHECK2:       omp.inner.for.cond:
767 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
768 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
769 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
770 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
771 // CHECK2:       omp.inner.for.body:
772 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
773 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
774 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
775 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
776 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[SIVAR1]])
777 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
778 // CHECK2:       omp.inner.for.inc:
779 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
780 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
781 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
782 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
783 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
784 // CHECK2:       omp.inner.for.end:
785 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
786 // CHECK2:       omp.loop.exit:
787 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
788 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
789 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
790 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
791 // CHECK2:       .omp.final.then:
792 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
793 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
794 // CHECK2:       .omp.final.done:
795 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
796 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
797 // CHECK2-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
798 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
799 // CHECK2-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
800 // CHECK2-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
801 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
802 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
803 // CHECK2-NEXT:    ]
804 // CHECK2:       .omp.reduction.case1:
805 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
806 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
807 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
808 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
809 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
810 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
811 // CHECK2:       .omp.reduction.case2:
812 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
813 // CHECK2-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
814 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
815 // CHECK2:       .omp.reduction.default:
816 // CHECK2-NEXT:    ret void
817 //
818 //
819 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
820 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
821 // CHECK2-NEXT:  entry:
822 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
823 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
824 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
825 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
826 // CHECK2-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
827 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
828 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
829 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
830 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
831 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
832 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
833 // CHECK2-NEXT:    [[SIVAR2:%.*]] = alloca i32, align 4
834 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
835 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
836 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
837 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
838 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
839 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
840 // CHECK2-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
841 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
842 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
843 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
844 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
845 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
846 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
847 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
848 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
849 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
850 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
851 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
852 // CHECK2-NEXT:    store i32 0, i32* [[SIVAR2]], align 4
853 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
854 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
855 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
856 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
857 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
858 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
859 // CHECK2:       cond.true:
860 // CHECK2-NEXT:    br label [[COND_END:%.*]]
861 // CHECK2:       cond.false:
862 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
863 // CHECK2-NEXT:    br label [[COND_END]]
864 // CHECK2:       cond.end:
865 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
866 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
867 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
868 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
869 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
870 // CHECK2:       omp.inner.for.cond:
871 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
872 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
873 // CHECK2-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
874 // CHECK2-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
875 // CHECK2:       omp.inner.for.body:
876 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
877 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
878 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
879 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
880 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
881 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR2]], align 4
882 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
883 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[SIVAR2]], align 4
884 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
885 // CHECK2:       omp.body.continue:
886 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
887 // CHECK2:       omp.inner.for.inc:
888 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
889 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1
890 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
891 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
892 // CHECK2:       omp.inner.for.end:
893 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
894 // CHECK2:       omp.loop.exit:
895 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
896 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
897 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
898 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
899 // CHECK2:       .omp.final.then:
900 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
901 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
902 // CHECK2:       .omp.final.done:
903 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
904 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR2]] to i8*
905 // CHECK2-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
906 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
907 // CHECK2-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
908 // CHECK2-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
909 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
910 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
911 // CHECK2-NEXT:    ]
912 // CHECK2:       .omp.reduction.case1:
913 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
914 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR2]], align 4
915 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
916 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
917 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
918 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
919 // CHECK2:       .omp.reduction.case2:
920 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR2]], align 4
921 // CHECK2-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
922 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
923 // CHECK2:       .omp.reduction.default:
924 // CHECK2-NEXT:    ret void
925 //
926 //
927 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
928 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
929 // CHECK2-NEXT:  entry:
930 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
931 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
932 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
933 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
934 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
935 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
936 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
937 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
938 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
939 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
940 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
941 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
942 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
943 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
944 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
945 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
946 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
947 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
948 // CHECK2-NEXT:    ret void
949 //
950 //
951 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
952 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
953 // CHECK2-NEXT:  entry:
954 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
955 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
956 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
957 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
958 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
959 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
960 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
961 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
962 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
963 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
964 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
965 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
966 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
967 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
968 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
969 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
970 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
971 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
972 // CHECK2-NEXT:    ret void
973 //
974 //
975 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
976 // CHECK2-SAME: () #[[ATTR5:[0-9]+]] comdat {
977 // CHECK2-NEXT:  entry:
978 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
979 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
980 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
981 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
982 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
983 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
984 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR]], align 4
985 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
986 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
987 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
988 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
989 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 8
990 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
991 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
992 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 8
993 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
994 // CHECK2-NEXT:    store i8* null, i8** [[TMP5]], align 8
995 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
996 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
997 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 2)
998 // CHECK2-NEXT:    [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
999 // CHECK2-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
1000 // CHECK2-NEXT:    br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1001 // CHECK2:       omp_offload.failed:
1002 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR2]]
1003 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1004 // CHECK2:       omp_offload.cont:
1005 // CHECK2-NEXT:    ret i32 0
1006 //
1007 //
1008 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
1009 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1010 // CHECK2-NEXT:  entry:
1011 // CHECK2-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1012 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1013 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1014 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[TMP0]])
1015 // CHECK2-NEXT:    ret void
1016 //
1017 //
1018 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
1019 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1020 // CHECK2-NEXT:  entry:
1021 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1022 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1023 // CHECK2-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1024 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1025 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1026 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1027 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1028 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1029 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1030 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1031 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1032 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1033 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1034 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1035 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1036 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1037 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
1038 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1039 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1040 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1041 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1042 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1043 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1044 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1045 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1046 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
1047 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1048 // CHECK2:       cond.true:
1049 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1050 // CHECK2:       cond.false:
1051 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1052 // CHECK2-NEXT:    br label [[COND_END]]
1053 // CHECK2:       cond.end:
1054 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1055 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1056 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1057 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1058 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1059 // CHECK2:       omp.inner.for.cond:
1060 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1061 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1062 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1063 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1064 // CHECK2:       omp.inner.for.body:
1065 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1066 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1067 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1068 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1069 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[T_VAR1]])
1070 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1071 // CHECK2:       omp.inner.for.inc:
1072 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1073 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1074 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1075 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1076 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
1077 // CHECK2:       omp.inner.for.end:
1078 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1079 // CHECK2:       omp.loop.exit:
1080 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1081 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1082 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1083 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1084 // CHECK2:       .omp.final.then:
1085 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
1086 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1087 // CHECK2:       .omp.final.done:
1088 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1089 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR1]] to i8*
1090 // CHECK2-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
1091 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1092 // CHECK2-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
1093 // CHECK2-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1094 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1095 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1096 // CHECK2-NEXT:    ]
1097 // CHECK2:       .omp.reduction.case1:
1098 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
1099 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR1]], align 4
1100 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1101 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
1102 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1103 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1104 // CHECK2:       .omp.reduction.case2:
1105 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR1]], align 4
1106 // CHECK2-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
1107 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1108 // CHECK2:       .omp.reduction.default:
1109 // CHECK2-NEXT:    ret void
1110 //
1111 //
1112 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
1113 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1114 // CHECK2-NEXT:  entry:
1115 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1116 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1117 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1118 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1119 // CHECK2-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1120 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1121 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1122 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1123 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1124 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1125 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1126 // CHECK2-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
1127 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1128 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1129 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1130 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1131 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1132 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1133 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1134 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1135 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1136 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1137 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1138 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1139 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1140 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1141 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1142 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1143 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1144 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1145 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR2]], align 4
1146 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1147 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1148 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1149 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1150 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
1151 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1152 // CHECK2:       cond.true:
1153 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1154 // CHECK2:       cond.false:
1155 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1156 // CHECK2-NEXT:    br label [[COND_END]]
1157 // CHECK2:       cond.end:
1158 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1159 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1160 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1161 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1162 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1163 // CHECK2:       omp.inner.for.cond:
1164 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1165 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1166 // CHECK2-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1167 // CHECK2-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1168 // CHECK2:       omp.inner.for.body:
1169 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1170 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1171 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1172 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1173 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1174 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[T_VAR2]], align 4
1175 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
1176 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[T_VAR2]], align 4
1177 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1178 // CHECK2:       omp.body.continue:
1179 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1180 // CHECK2:       omp.inner.for.inc:
1181 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1182 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1
1183 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
1184 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
1185 // CHECK2:       omp.inner.for.end:
1186 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1187 // CHECK2:       omp.loop.exit:
1188 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1189 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1190 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1191 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1192 // CHECK2:       .omp.final.then:
1193 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
1194 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1195 // CHECK2:       .omp.final.done:
1196 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1197 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR2]] to i8*
1198 // CHECK2-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
1199 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1200 // CHECK2-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
1201 // CHECK2-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1202 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1203 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1204 // CHECK2-NEXT:    ]
1205 // CHECK2:       .omp.reduction.case1:
1206 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
1207 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR2]], align 4
1208 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1209 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
1210 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1211 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1212 // CHECK2:       .omp.reduction.case2:
1213 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR2]], align 4
1214 // CHECK2-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
1215 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1216 // CHECK2:       .omp.reduction.default:
1217 // CHECK2-NEXT:    ret void
1218 //
1219 //
1220 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
1221 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1222 // CHECK2-NEXT:  entry:
1223 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1224 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1225 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1226 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1227 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1228 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1229 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1230 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1231 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1232 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1233 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1234 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1235 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1236 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1237 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1238 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1239 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1240 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1241 // CHECK2-NEXT:    ret void
1242 //
1243 //
1244 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
1245 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1246 // CHECK2-NEXT:  entry:
1247 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1248 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1249 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1250 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1251 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1252 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1253 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1254 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1255 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1256 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1257 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1258 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1259 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1260 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1261 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1262 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1263 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1264 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1265 // CHECK2-NEXT:    ret void
1266 //
1267 //
1268 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1269 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] {
1270 // CHECK2-NEXT:  entry:
1271 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
1272 // CHECK2-NEXT:    ret void
1273 //
1274 //
1275 // CHECK3-LABEL: define {{[^@]+}}@main
1276 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
1277 // CHECK3-NEXT:  entry:
1278 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1279 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
1280 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
1281 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
1282 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1283 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1284 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1285 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1286 // CHECK3-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 4
1287 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1288 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1289 // CHECK3-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 4
1290 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1291 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
1292 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1293 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1294 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 2)
1295 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1296 // CHECK3-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1297 // CHECK3-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1298 // CHECK3:       omp_offload.failed:
1299 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(i32* @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
1300 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1301 // CHECK3:       omp_offload.cont:
1302 // CHECK3-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1303 // CHECK3-NEXT:    ret i32 [[CALL]]
1304 //
1305 //
1306 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66
1307 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
1308 // CHECK3-NEXT:  entry:
1309 // CHECK3-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1310 // CHECK3-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
1311 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
1312 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
1313 // CHECK3-NEXT:    ret void
1314 //
1315 //
1316 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1317 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
1318 // CHECK3-NEXT:  entry:
1319 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1320 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1321 // CHECK3-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1322 // CHECK3-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
1323 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1324 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1325 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1326 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1327 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1328 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1329 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1330 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1331 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1332 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1333 // CHECK3-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
1334 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
1335 // CHECK3-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
1336 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1337 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1338 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1339 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1340 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1341 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1342 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1343 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1344 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
1345 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1346 // CHECK3:       cond.true:
1347 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1348 // CHECK3:       cond.false:
1349 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1350 // CHECK3-NEXT:    br label [[COND_END]]
1351 // CHECK3:       cond.end:
1352 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1353 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1354 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1355 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1356 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1357 // CHECK3:       omp.inner.for.cond:
1358 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1359 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1360 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1361 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1362 // CHECK3:       omp.inner.for.body:
1363 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1364 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1365 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], i32* [[SIVAR1]])
1366 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1367 // CHECK3:       omp.inner.for.inc:
1368 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1369 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1370 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
1371 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1372 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
1373 // CHECK3:       omp.inner.for.end:
1374 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1375 // CHECK3:       omp.loop.exit:
1376 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1377 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1378 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1379 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1380 // CHECK3:       .omp.final.then:
1381 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
1382 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1383 // CHECK3:       .omp.final.done:
1384 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1385 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i32* [[SIVAR1]] to i8*
1386 // CHECK3-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
1387 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1388 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
1389 // CHECK3-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1390 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1391 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1392 // CHECK3-NEXT:    ]
1393 // CHECK3:       .omp.reduction.case1:
1394 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
1395 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[SIVAR1]], align 4
1396 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
1397 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
1398 // CHECK3-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1399 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1400 // CHECK3:       .omp.reduction.case2:
1401 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[SIVAR1]], align 4
1402 // CHECK3-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
1403 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1404 // CHECK3:       .omp.reduction.default:
1405 // CHECK3-NEXT:    ret void
1406 //
1407 //
1408 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
1409 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
1410 // CHECK3-NEXT:  entry:
1411 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1412 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1413 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1414 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1415 // CHECK3-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1416 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1417 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1418 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1419 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1420 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1421 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1422 // CHECK3-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
1423 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1424 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1425 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1426 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1427 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1428 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1429 // CHECK3-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
1430 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
1431 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1432 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1433 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1434 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1435 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
1436 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
1437 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1438 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1439 // CHECK3-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
1440 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1441 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1442 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1443 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1444 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
1445 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1446 // CHECK3:       cond.true:
1447 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1448 // CHECK3:       cond.false:
1449 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1450 // CHECK3-NEXT:    br label [[COND_END]]
1451 // CHECK3:       cond.end:
1452 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1453 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1454 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1455 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1456 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1457 // CHECK3:       omp.inner.for.cond:
1458 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1459 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1460 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1461 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1462 // CHECK3:       omp.inner.for.body:
1463 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1464 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1465 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1466 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1467 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1468 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR1]], align 4
1469 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
1470 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[SIVAR1]], align 4
1471 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1472 // CHECK3:       omp.body.continue:
1473 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1474 // CHECK3:       omp.inner.for.inc:
1475 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1476 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
1477 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
1478 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
1479 // CHECK3:       omp.inner.for.end:
1480 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1481 // CHECK3:       omp.loop.exit:
1482 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1483 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1484 // CHECK3-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1485 // CHECK3-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1486 // CHECK3:       .omp.final.then:
1487 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
1488 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1489 // CHECK3:       .omp.final.done:
1490 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1491 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
1492 // CHECK3-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 4
1493 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1494 // CHECK3-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1495 // CHECK3-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1496 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1497 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1498 // CHECK3-NEXT:    ]
1499 // CHECK3:       .omp.reduction.case1:
1500 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
1501 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
1502 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1503 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
1504 // CHECK3-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1505 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1506 // CHECK3:       .omp.reduction.case2:
1507 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
1508 // CHECK3-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
1509 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1510 // CHECK3:       .omp.reduction.default:
1511 // CHECK3-NEXT:    ret void
1512 //
1513 //
1514 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1515 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
1516 // CHECK3-NEXT:  entry:
1517 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1518 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
1519 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1520 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
1521 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1522 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1523 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
1524 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1525 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1526 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
1527 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1528 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
1529 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
1530 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1531 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1532 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1533 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1534 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1535 // CHECK3-NEXT:    ret void
1536 //
1537 //
1538 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
1539 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1540 // CHECK3-NEXT:  entry:
1541 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1542 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
1543 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1544 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
1545 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1546 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1547 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
1548 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1549 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1550 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
1551 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1552 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
1553 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
1554 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1555 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1556 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1557 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1558 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1559 // CHECK3-NEXT:    ret void
1560 //
1561 //
1562 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1563 // CHECK3-SAME: () #[[ATTR5:[0-9]+]] comdat {
1564 // CHECK3-NEXT:  entry:
1565 // CHECK3-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1566 // CHECK3-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1567 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
1568 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
1569 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
1570 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1571 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1572 // CHECK3-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1573 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
1574 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1575 // CHECK3-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
1576 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 4
1577 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1578 // CHECK3-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
1579 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 4
1580 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1581 // CHECK3-NEXT:    store i8* null, i8** [[TMP5]], align 4
1582 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1583 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1584 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 2)
1585 // CHECK3-NEXT:    [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1586 // CHECK3-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
1587 // CHECK3-NEXT:    br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1588 // CHECK3:       omp_offload.failed:
1589 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR2]]
1590 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1591 // CHECK3:       omp_offload.cont:
1592 // CHECK3-NEXT:    ret i32 0
1593 //
1594 //
1595 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
1596 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1597 // CHECK3-NEXT:  entry:
1598 // CHECK3-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
1599 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
1600 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
1601 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[TMP0]])
1602 // CHECK3-NEXT:    ret void
1603 //
1604 //
1605 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
1606 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1607 // CHECK3-NEXT:  entry:
1608 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1609 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1610 // CHECK3-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
1611 // CHECK3-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1612 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1613 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1614 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1615 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1616 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1617 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1618 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1619 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1620 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1621 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1622 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
1623 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
1624 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
1625 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1626 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1627 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1628 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1629 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1630 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1631 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1632 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1633 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
1634 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1635 // CHECK3:       cond.true:
1636 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1637 // CHECK3:       cond.false:
1638 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1639 // CHECK3-NEXT:    br label [[COND_END]]
1640 // CHECK3:       cond.end:
1641 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1642 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1643 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1644 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1645 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1646 // CHECK3:       omp.inner.for.cond:
1647 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1648 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1649 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1650 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1651 // CHECK3:       omp.inner.for.body:
1652 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1653 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1654 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], i32* [[T_VAR1]])
1655 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1656 // CHECK3:       omp.inner.for.inc:
1657 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1658 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1659 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
1660 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1661 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
1662 // CHECK3:       omp.inner.for.end:
1663 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1664 // CHECK3:       omp.loop.exit:
1665 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1666 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1667 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1668 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1669 // CHECK3:       .omp.final.then:
1670 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
1671 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1672 // CHECK3:       .omp.final.done:
1673 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1674 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i32* [[T_VAR1]] to i8*
1675 // CHECK3-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
1676 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1677 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
1678 // CHECK3-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1679 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1680 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1681 // CHECK3-NEXT:    ]
1682 // CHECK3:       .omp.reduction.case1:
1683 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
1684 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[T_VAR1]], align 4
1685 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
1686 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
1687 // CHECK3-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1688 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1689 // CHECK3:       .omp.reduction.case2:
1690 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[T_VAR1]], align 4
1691 // CHECK3-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
1692 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1693 // CHECK3:       .omp.reduction.default:
1694 // CHECK3-NEXT:    ret void
1695 //
1696 //
1697 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
1698 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
1699 // CHECK3-NEXT:  entry:
1700 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1701 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1702 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1703 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1704 // CHECK3-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
1705 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1706 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1707 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1708 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1709 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1710 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1711 // CHECK3-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1712 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1713 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1714 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1715 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1716 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1717 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1718 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
1719 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
1720 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1721 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1722 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1723 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1724 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
1725 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
1726 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1727 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1728 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
1729 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1730 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1731 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1732 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1733 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
1734 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1735 // CHECK3:       cond.true:
1736 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1737 // CHECK3:       cond.false:
1738 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1739 // CHECK3-NEXT:    br label [[COND_END]]
1740 // CHECK3:       cond.end:
1741 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1742 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1743 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1744 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1745 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1746 // CHECK3:       omp.inner.for.cond:
1747 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1748 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1749 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1750 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1751 // CHECK3:       omp.inner.for.body:
1752 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1753 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1754 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1755 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1756 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1757 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[T_VAR1]], align 4
1758 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
1759 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[T_VAR1]], align 4
1760 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1761 // CHECK3:       omp.body.continue:
1762 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1763 // CHECK3:       omp.inner.for.inc:
1764 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1765 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
1766 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
1767 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
1768 // CHECK3:       omp.inner.for.end:
1769 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1770 // CHECK3:       omp.loop.exit:
1771 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1772 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1773 // CHECK3-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1774 // CHECK3-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1775 // CHECK3:       .omp.final.then:
1776 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
1777 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1778 // CHECK3:       .omp.final.done:
1779 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1780 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR1]] to i8*
1781 // CHECK3-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 4
1782 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1783 // CHECK3-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
1784 // CHECK3-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1785 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1786 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1787 // CHECK3-NEXT:    ]
1788 // CHECK3:       .omp.reduction.case1:
1789 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
1790 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR1]], align 4
1791 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1792 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
1793 // CHECK3-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1794 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1795 // CHECK3:       .omp.reduction.case2:
1796 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR1]], align 4
1797 // CHECK3-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
1798 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1799 // CHECK3:       .omp.reduction.default:
1800 // CHECK3-NEXT:    ret void
1801 //
1802 //
1803 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
1804 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1805 // CHECK3-NEXT:  entry:
1806 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1807 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
1808 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1809 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
1810 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1811 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1812 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
1813 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1814 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1815 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
1816 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1817 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
1818 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
1819 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1820 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1821 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1822 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1823 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1824 // CHECK3-NEXT:    ret void
1825 //
1826 //
1827 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
1828 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1829 // CHECK3-NEXT:  entry:
1830 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
1831 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
1832 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1833 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
1834 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1835 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1836 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
1837 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1838 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1839 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
1840 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1841 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
1842 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
1843 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1844 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1845 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1846 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1847 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1848 // CHECK3-NEXT:    ret void
1849 //
1850 //
1851 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1852 // CHECK3-SAME: () #[[ATTR7:[0-9]+]] {
1853 // CHECK3-NEXT:  entry:
1854 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
1855 // CHECK3-NEXT:    ret void
1856 //
1857 //
1858 // CHECK4-LABEL: define {{[^@]+}}@main
1859 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
1860 // CHECK4-NEXT:  entry:
1861 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1862 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
1863 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
1864 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
1865 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1866 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1867 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1868 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1869 // CHECK4-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 4
1870 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1871 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1872 // CHECK4-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 4
1873 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1874 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
1875 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1876 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1877 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 2)
1878 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1879 // CHECK4-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1880 // CHECK4-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1881 // CHECK4:       omp_offload.failed:
1882 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(i32* @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
1883 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1884 // CHECK4:       omp_offload.cont:
1885 // CHECK4-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1886 // CHECK4-NEXT:    ret i32 [[CALL]]
1887 //
1888 //
1889 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66
1890 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
1891 // CHECK4-NEXT:  entry:
1892 // CHECK4-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1893 // CHECK4-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
1894 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
1895 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
1896 // CHECK4-NEXT:    ret void
1897 //
1898 //
1899 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1900 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
1901 // CHECK4-NEXT:  entry:
1902 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1903 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1904 // CHECK4-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1905 // CHECK4-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
1906 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1907 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1908 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1909 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1910 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1911 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1912 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1913 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1914 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1915 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1916 // CHECK4-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
1917 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
1918 // CHECK4-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
1919 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1920 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1921 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1922 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1923 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1924 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1925 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1926 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1927 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
1928 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1929 // CHECK4:       cond.true:
1930 // CHECK4-NEXT:    br label [[COND_END:%.*]]
1931 // CHECK4:       cond.false:
1932 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1933 // CHECK4-NEXT:    br label [[COND_END]]
1934 // CHECK4:       cond.end:
1935 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1936 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1937 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1938 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1939 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1940 // CHECK4:       omp.inner.for.cond:
1941 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1942 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1943 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1944 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1945 // CHECK4:       omp.inner.for.body:
1946 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1947 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1948 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], i32* [[SIVAR1]])
1949 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1950 // CHECK4:       omp.inner.for.inc:
1951 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1952 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1953 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
1954 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1955 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
1956 // CHECK4:       omp.inner.for.end:
1957 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1958 // CHECK4:       omp.loop.exit:
1959 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1960 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1961 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1962 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1963 // CHECK4:       .omp.final.then:
1964 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
1965 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1966 // CHECK4:       .omp.final.done:
1967 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1968 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast i32* [[SIVAR1]] to i8*
1969 // CHECK4-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
1970 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1971 // CHECK4-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
1972 // CHECK4-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1973 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1974 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1975 // CHECK4-NEXT:    ]
1976 // CHECK4:       .omp.reduction.case1:
1977 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
1978 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[SIVAR1]], align 4
1979 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
1980 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
1981 // CHECK4-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1982 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1983 // CHECK4:       .omp.reduction.case2:
1984 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[SIVAR1]], align 4
1985 // CHECK4-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
1986 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1987 // CHECK4:       .omp.reduction.default:
1988 // CHECK4-NEXT:    ret void
1989 //
1990 //
1991 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
1992 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
1993 // CHECK4-NEXT:  entry:
1994 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1995 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1996 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1997 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1998 // CHECK4-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
1999 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2000 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2001 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2002 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2003 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2004 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2005 // CHECK4-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
2006 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
2007 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2008 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2009 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2010 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2011 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2012 // CHECK4-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
2013 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
2014 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2015 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2016 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2017 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2018 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2019 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2020 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2021 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2022 // CHECK4-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
2023 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2024 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2025 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2026 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2027 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
2028 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2029 // CHECK4:       cond.true:
2030 // CHECK4-NEXT:    br label [[COND_END:%.*]]
2031 // CHECK4:       cond.false:
2032 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2033 // CHECK4-NEXT:    br label [[COND_END]]
2034 // CHECK4:       cond.end:
2035 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2036 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2037 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2038 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2039 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2040 // CHECK4:       omp.inner.for.cond:
2041 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2042 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2043 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2044 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2045 // CHECK4:       omp.inner.for.body:
2046 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2047 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2048 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2049 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2050 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
2051 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR1]], align 4
2052 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
2053 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[SIVAR1]], align 4
2054 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2055 // CHECK4:       omp.body.continue:
2056 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2057 // CHECK4:       omp.inner.for.inc:
2058 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2059 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
2060 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
2061 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
2062 // CHECK4:       omp.inner.for.end:
2063 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2064 // CHECK4:       omp.loop.exit:
2065 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2066 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2067 // CHECK4-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2068 // CHECK4-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2069 // CHECK4:       .omp.final.then:
2070 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
2071 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2072 // CHECK4:       .omp.final.done:
2073 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2074 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
2075 // CHECK4-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 4
2076 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2077 // CHECK4-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2078 // CHECK4-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2079 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2080 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2081 // CHECK4-NEXT:    ]
2082 // CHECK4:       .omp.reduction.case1:
2083 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
2084 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
2085 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2086 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
2087 // CHECK4-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2088 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2089 // CHECK4:       .omp.reduction.case2:
2090 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
2091 // CHECK4-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
2092 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2093 // CHECK4:       .omp.reduction.default:
2094 // CHECK4-NEXT:    ret void
2095 //
2096 //
2097 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2098 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
2099 // CHECK4-NEXT:  entry:
2100 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2101 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2102 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2103 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2104 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2105 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2106 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2107 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2108 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2109 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2110 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2111 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2112 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2113 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2114 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2115 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2116 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2117 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2118 // CHECK4-NEXT:    ret void
2119 //
2120 //
2121 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
2122 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2123 // CHECK4-NEXT:  entry:
2124 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2125 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2126 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2127 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2128 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2129 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2130 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2131 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2132 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2133 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2134 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2135 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2136 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2137 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2138 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2139 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2140 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2141 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2142 // CHECK4-NEXT:    ret void
2143 //
2144 //
2145 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2146 // CHECK4-SAME: () #[[ATTR5:[0-9]+]] comdat {
2147 // CHECK4-NEXT:  entry:
2148 // CHECK4-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
2149 // CHECK4-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
2150 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
2151 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
2152 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
2153 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2154 // CHECK4-NEXT:    store i32 0, i32* [[T_VAR]], align 4
2155 // CHECK4-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2156 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
2157 // CHECK4-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2158 // CHECK4-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
2159 // CHECK4-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 4
2160 // CHECK4-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2161 // CHECK4-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
2162 // CHECK4-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 4
2163 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2164 // CHECK4-NEXT:    store i8* null, i8** [[TMP5]], align 4
2165 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2166 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2167 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 2)
2168 // CHECK4-NEXT:    [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2169 // CHECK4-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
2170 // CHECK4-NEXT:    br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2171 // CHECK4:       omp_offload.failed:
2172 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR2]]
2173 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2174 // CHECK4:       omp_offload.cont:
2175 // CHECK4-NEXT:    ret i32 0
2176 //
2177 //
2178 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
2179 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
2180 // CHECK4-NEXT:  entry:
2181 // CHECK4-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
2182 // CHECK4-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
2183 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
2184 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[TMP0]])
2185 // CHECK4-NEXT:    ret void
2186 //
2187 //
2188 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
2189 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
2190 // CHECK4-NEXT:  entry:
2191 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2192 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2193 // CHECK4-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
2194 // CHECK4-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
2195 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2196 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2197 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2198 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2199 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2200 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2201 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
2202 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2203 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2204 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2205 // CHECK4-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
2206 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
2207 // CHECK4-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
2208 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2209 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
2210 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2211 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2212 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2213 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2214 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2215 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2216 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
2217 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2218 // CHECK4:       cond.true:
2219 // CHECK4-NEXT:    br label [[COND_END:%.*]]
2220 // CHECK4:       cond.false:
2221 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2222 // CHECK4-NEXT:    br label [[COND_END]]
2223 // CHECK4:       cond.end:
2224 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2225 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2226 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2227 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2228 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2229 // CHECK4:       omp.inner.for.cond:
2230 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2231 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2232 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2233 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2234 // CHECK4:       omp.inner.for.body:
2235 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2236 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2237 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], i32* [[T_VAR1]])
2238 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2239 // CHECK4:       omp.inner.for.inc:
2240 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2241 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2242 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2243 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2244 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
2245 // CHECK4:       omp.inner.for.end:
2246 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2247 // CHECK4:       omp.loop.exit:
2248 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2249 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2250 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2251 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2252 // CHECK4:       .omp.final.then:
2253 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
2254 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2255 // CHECK4:       .omp.final.done:
2256 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2257 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast i32* [[T_VAR1]] to i8*
2258 // CHECK4-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
2259 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2260 // CHECK4-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
2261 // CHECK4-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2262 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2263 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2264 // CHECK4-NEXT:    ]
2265 // CHECK4:       .omp.reduction.case1:
2266 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
2267 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[T_VAR1]], align 4
2268 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
2269 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
2270 // CHECK4-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2271 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2272 // CHECK4:       .omp.reduction.case2:
2273 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[T_VAR1]], align 4
2274 // CHECK4-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
2275 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2276 // CHECK4:       .omp.reduction.default:
2277 // CHECK4-NEXT:    ret void
2278 //
2279 //
2280 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
2281 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
2282 // CHECK4-NEXT:  entry:
2283 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2284 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2285 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2286 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2287 // CHECK4-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
2288 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2289 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2290 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2291 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2292 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2293 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2294 // CHECK4-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
2295 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
2296 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2297 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2298 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2299 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2300 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2301 // CHECK4-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
2302 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
2303 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2304 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2305 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2306 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2307 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2308 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2309 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2310 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2311 // CHECK4-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
2312 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2313 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2314 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2315 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2316 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
2317 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2318 // CHECK4:       cond.true:
2319 // CHECK4-NEXT:    br label [[COND_END:%.*]]
2320 // CHECK4:       cond.false:
2321 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2322 // CHECK4-NEXT:    br label [[COND_END]]
2323 // CHECK4:       cond.end:
2324 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2325 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2326 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2327 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2328 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2329 // CHECK4:       omp.inner.for.cond:
2330 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2331 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2332 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2333 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2334 // CHECK4:       omp.inner.for.body:
2335 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2336 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2337 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2338 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2339 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
2340 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[T_VAR1]], align 4
2341 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
2342 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[T_VAR1]], align 4
2343 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2344 // CHECK4:       omp.body.continue:
2345 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2346 // CHECK4:       omp.inner.for.inc:
2347 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2348 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
2349 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
2350 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
2351 // CHECK4:       omp.inner.for.end:
2352 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2353 // CHECK4:       omp.loop.exit:
2354 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2355 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2356 // CHECK4-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2357 // CHECK4-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2358 // CHECK4:       .omp.final.then:
2359 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
2360 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2361 // CHECK4:       .omp.final.done:
2362 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2363 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i32* [[T_VAR1]] to i8*
2364 // CHECK4-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 4
2365 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2366 // CHECK4-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
2367 // CHECK4-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2368 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2369 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2370 // CHECK4-NEXT:    ]
2371 // CHECK4:       .omp.reduction.case1:
2372 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
2373 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[T_VAR1]], align 4
2374 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2375 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
2376 // CHECK4-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2377 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2378 // CHECK4:       .omp.reduction.case2:
2379 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR1]], align 4
2380 // CHECK4-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
2381 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2382 // CHECK4:       .omp.reduction.default:
2383 // CHECK4-NEXT:    ret void
2384 //
2385 //
2386 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
2387 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2388 // CHECK4-NEXT:  entry:
2389 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2390 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2391 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2392 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2393 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2394 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2395 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2396 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2397 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2398 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2399 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2400 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2401 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2402 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2403 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2404 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2405 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2406 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2407 // CHECK4-NEXT:    ret void
2408 //
2409 //
2410 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
2411 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2412 // CHECK4-NEXT:  entry:
2413 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2414 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2415 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2416 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2417 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2418 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2419 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2420 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2421 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2422 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2423 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2424 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2425 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2426 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2427 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2428 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2429 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2430 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2431 // CHECK4-NEXT:    ret void
2432 //
2433 //
2434 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2435 // CHECK4-SAME: () #[[ATTR7:[0-9]+]] {
2436 // CHECK4-NEXT:  entry:
2437 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
2438 // CHECK4-NEXT:    ret void
2439 //
2440 //
2441 // CHECK5-LABEL: define {{[^@]+}}@main
2442 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
2443 // CHECK5-NEXT:  entry:
2444 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2445 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2446 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2447 // CHECK5-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2448 // CHECK5-NEXT:    ret i32 0
2449 //
2450 //
2451 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44
2452 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
2453 // CHECK5-NEXT:  entry:
2454 // CHECK5-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2455 // CHECK5-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2456 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2457 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
2458 // CHECK5-NEXT:    ret void
2459 //
2460 //
2461 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
2462 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
2463 // CHECK5-NEXT:  entry:
2464 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2465 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2466 // CHECK5-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2467 // CHECK5-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
2468 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2469 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2470 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2471 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2472 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2473 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2474 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
2475 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2476 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2477 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2478 // CHECK5-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2479 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2480 // CHECK5-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
2481 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2482 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
2483 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2484 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2485 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2486 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2487 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2488 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2489 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
2490 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2491 // CHECK5:       cond.true:
2492 // CHECK5-NEXT:    br label [[COND_END:%.*]]
2493 // CHECK5:       cond.false:
2494 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2495 // CHECK5-NEXT:    br label [[COND_END]]
2496 // CHECK5:       cond.end:
2497 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2498 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2499 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2500 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2501 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2502 // CHECK5:       omp.inner.for.cond:
2503 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2504 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2505 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2506 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2507 // CHECK5:       omp.inner.for.body:
2508 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2509 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2510 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2511 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2512 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[SIVAR1]])
2513 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2514 // CHECK5:       omp.inner.for.inc:
2515 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2516 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2517 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2518 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2519 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
2520 // CHECK5:       omp.inner.for.end:
2521 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2522 // CHECK5:       omp.loop.exit:
2523 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2524 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2525 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2526 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2527 // CHECK5:       .omp.final.then:
2528 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
2529 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2530 // CHECK5:       .omp.final.done:
2531 // CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2532 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
2533 // CHECK5-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
2534 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2535 // CHECK5-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
2536 // CHECK5-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2537 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2538 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2539 // CHECK5-NEXT:    ]
2540 // CHECK5:       .omp.reduction.case1:
2541 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
2542 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
2543 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2544 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
2545 // CHECK5-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2546 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2547 // CHECK5:       .omp.reduction.case2:
2548 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
2549 // CHECK5-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
2550 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2551 // CHECK5:       .omp.reduction.default:
2552 // CHECK5-NEXT:    ret void
2553 //
2554 //
2555 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
2556 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
2557 // CHECK5-NEXT:  entry:
2558 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2559 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2560 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2561 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2562 // CHECK5-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2563 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2564 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2565 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2566 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2567 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2568 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2569 // CHECK5-NEXT:    [[SIVAR2:%.*]] = alloca i32, align 4
2570 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
2571 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2572 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2573 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2574 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2575 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2576 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2577 // CHECK5-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2578 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2579 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2580 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2581 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2582 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2583 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2584 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2585 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2586 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
2587 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2588 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2589 // CHECK5-NEXT:    store i32 0, i32* [[SIVAR2]], align 4
2590 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2591 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2592 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2593 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2594 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
2595 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2596 // CHECK5:       cond.true:
2597 // CHECK5-NEXT:    br label [[COND_END:%.*]]
2598 // CHECK5:       cond.false:
2599 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2600 // CHECK5-NEXT:    br label [[COND_END]]
2601 // CHECK5:       cond.end:
2602 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2603 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2604 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2605 // CHECK5-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2606 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2607 // CHECK5:       omp.inner.for.cond:
2608 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2609 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2610 // CHECK5-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2611 // CHECK5-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2612 // CHECK5:       omp.inner.for.body:
2613 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2614 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2615 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2616 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2617 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
2618 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR2]], align 4
2619 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
2620 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[SIVAR2]], align 4
2621 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2622 // CHECK5-NEXT:    store i32* [[SIVAR2]], i32** [[TMP13]], align 8
2623 // CHECK5-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(8) [[REF_TMP]])
2624 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2625 // CHECK5:       omp.body.continue:
2626 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2627 // CHECK5:       omp.inner.for.inc:
2628 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2629 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1
2630 // CHECK5-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
2631 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
2632 // CHECK5:       omp.inner.for.end:
2633 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2634 // CHECK5:       omp.loop.exit:
2635 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2636 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2637 // CHECK5-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
2638 // CHECK5-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2639 // CHECK5:       .omp.final.then:
2640 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
2641 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2642 // CHECK5:       .omp.final.done:
2643 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2644 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i32* [[SIVAR2]] to i8*
2645 // CHECK5-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
2646 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2647 // CHECK5-NEXT:    [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2648 // CHECK5-NEXT:    switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2649 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2650 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2651 // CHECK5-NEXT:    ]
2652 // CHECK5:       .omp.reduction.case1:
2653 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP0]], align 4
2654 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR2]], align 4
2655 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2656 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
2657 // CHECK5-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2658 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2659 // CHECK5:       .omp.reduction.case2:
2660 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[SIVAR2]], align 4
2661 // CHECK5-NEXT:    [[TMP24:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP23]] monotonic, align 4
2662 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2663 // CHECK5:       .omp.reduction.default:
2664 // CHECK5-NEXT:    ret void
2665 //
2666 //
2667 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2668 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
2669 // CHECK5-NEXT:  entry:
2670 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2671 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2672 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2673 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2674 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2675 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2676 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2677 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2678 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2679 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2680 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2681 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2682 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2683 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2684 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2685 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2686 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2687 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2688 // CHECK5-NEXT:    ret void
2689 //
2690 //
2691 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
2692 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4]] {
2693 // CHECK5-NEXT:  entry:
2694 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2695 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2696 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2697 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2698 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2699 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2700 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2701 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2702 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2703 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2704 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2705 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2706 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2707 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2708 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2709 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2710 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2711 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2712 // CHECK5-NEXT:    ret void
2713 //
2714 //
2715 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2716 // CHECK5-SAME: () #[[ATTR6:[0-9]+]] {
2717 // CHECK5-NEXT:  entry:
2718 // CHECK5-NEXT:    call void @__tgt_register_requires(i64 1)
2719 // CHECK5-NEXT:    ret void
2720 //
2721 //
2722 // CHECK6-LABEL: define {{[^@]+}}@main
2723 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
2724 // CHECK6-NEXT:  entry:
2725 // CHECK6-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2726 // CHECK6-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2727 // CHECK6-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2728 // CHECK6-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2729 // CHECK6-NEXT:    ret i32 0
2730 //
2731 //
2732 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44
2733 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
2734 // CHECK6-NEXT:  entry:
2735 // CHECK6-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2736 // CHECK6-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2737 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2738 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
2739 // CHECK6-NEXT:    ret void
2740 //
2741 //
2742 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
2743 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
2744 // CHECK6-NEXT:  entry:
2745 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2746 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2747 // CHECK6-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2748 // CHECK6-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
2749 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2750 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2751 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2752 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2753 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2754 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2755 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2756 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2757 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2758 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2759 // CHECK6-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2760 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2761 // CHECK6-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
2762 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2763 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
2764 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2765 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2766 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2767 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2768 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2769 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2770 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
2771 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2772 // CHECK6:       cond.true:
2773 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2774 // CHECK6:       cond.false:
2775 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2776 // CHECK6-NEXT:    br label [[COND_END]]
2777 // CHECK6:       cond.end:
2778 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2779 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2780 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2781 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2782 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2783 // CHECK6:       omp.inner.for.cond:
2784 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2785 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2786 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2787 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2788 // CHECK6:       omp.inner.for.body:
2789 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2790 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2791 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2792 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2793 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i32* [[SIVAR1]])
2794 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2795 // CHECK6:       omp.inner.for.inc:
2796 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2797 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2798 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2799 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2800 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
2801 // CHECK6:       omp.inner.for.end:
2802 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2803 // CHECK6:       omp.loop.exit:
2804 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2805 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2806 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2807 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2808 // CHECK6:       .omp.final.then:
2809 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
2810 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2811 // CHECK6:       .omp.final.done:
2812 // CHECK6-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2813 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i32* [[SIVAR1]] to i8*
2814 // CHECK6-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
2815 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2816 // CHECK6-NEXT:    [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
2817 // CHECK6-NEXT:    switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2818 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2819 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2820 // CHECK6-NEXT:    ]
2821 // CHECK6:       .omp.reduction.case1:
2822 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP0]], align 4
2823 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
2824 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2825 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[TMP0]], align 4
2826 // CHECK6-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2827 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2828 // CHECK6:       .omp.reduction.case2:
2829 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR1]], align 4
2830 // CHECK6-NEXT:    [[TMP23:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP22]] monotonic, align 4
2831 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2832 // CHECK6:       .omp.reduction.default:
2833 // CHECK6-NEXT:    ret void
2834 //
2835 //
2836 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
2837 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
2838 // CHECK6-NEXT:  entry:
2839 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2840 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2841 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2842 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2843 // CHECK6-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
2844 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2845 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2846 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2847 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2848 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2849 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2850 // CHECK6-NEXT:    [[SIVAR2:%.*]] = alloca i32, align 4
2851 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2852 // CHECK6-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2853 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2854 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2855 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2856 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2857 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2858 // CHECK6-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
2859 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
2860 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2861 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2862 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2863 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2864 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2865 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2866 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2867 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
2868 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2869 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2870 // CHECK6-NEXT:    store i32 0, i32* [[SIVAR2]], align 4
2871 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2872 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2873 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2874 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2875 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
2876 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2877 // CHECK6:       cond.true:
2878 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2879 // CHECK6:       cond.false:
2880 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2881 // CHECK6-NEXT:    br label [[COND_END]]
2882 // CHECK6:       cond.end:
2883 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2884 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2885 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2886 // CHECK6-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2887 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2888 // CHECK6:       omp.inner.for.cond:
2889 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2890 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2891 // CHECK6-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2892 // CHECK6-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2893 // CHECK6:       omp.inner.for.body:
2894 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2895 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2896 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2897 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2898 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
2899 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[SIVAR2]], align 4
2900 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
2901 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[SIVAR2]], align 4
2902 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2903 // CHECK6-NEXT:    store i32* [[SIVAR2]], i32** [[TMP13]], align 8
2904 // CHECK6-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(8) [[REF_TMP]])
2905 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2906 // CHECK6:       omp.body.continue:
2907 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2908 // CHECK6:       omp.inner.for.inc:
2909 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2910 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1
2911 // CHECK6-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
2912 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
2913 // CHECK6:       omp.inner.for.end:
2914 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2915 // CHECK6:       omp.loop.exit:
2916 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2917 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2918 // CHECK6-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
2919 // CHECK6-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2920 // CHECK6:       .omp.final.then:
2921 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
2922 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2923 // CHECK6:       .omp.final.done:
2924 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2925 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i32* [[SIVAR2]] to i8*
2926 // CHECK6-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
2927 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2928 // CHECK6-NEXT:    [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2929 // CHECK6-NEXT:    switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2930 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2931 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2932 // CHECK6-NEXT:    ]
2933 // CHECK6:       .omp.reduction.case1:
2934 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP0]], align 4
2935 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[SIVAR2]], align 4
2936 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2937 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[TMP0]], align 4
2938 // CHECK6-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2939 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2940 // CHECK6:       .omp.reduction.case2:
2941 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[SIVAR2]], align 4
2942 // CHECK6-NEXT:    [[TMP24:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP23]] monotonic, align 4
2943 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2944 // CHECK6:       .omp.reduction.default:
2945 // CHECK6-NEXT:    ret void
2946 //
2947 //
2948 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2949 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
2950 // CHECK6-NEXT:  entry:
2951 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2952 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2953 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2954 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2955 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2956 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2957 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2958 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2959 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2960 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2961 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2962 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2963 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2964 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2965 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2966 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2967 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2968 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2969 // CHECK6-NEXT:    ret void
2970 //
2971 //
2972 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
2973 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4]] {
2974 // CHECK6-NEXT:  entry:
2975 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2976 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2977 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2978 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2979 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2980 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2981 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2982 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2983 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2984 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2985 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2986 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2987 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2988 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2989 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2990 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
2991 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2992 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2993 // CHECK6-NEXT:    ret void
2994 //
2995 //
2996 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2997 // CHECK6-SAME: () #[[ATTR6:[0-9]+]] {
2998 // CHECK6-NEXT:  entry:
2999 // CHECK6-NEXT:    call void @__tgt_register_requires(i64 1)
3000 // CHECK6-NEXT:    ret void
3001 //
3002 //
3003 // CHECK7-LABEL: define {{[^@]+}}@main
3004 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
3005 // CHECK7-NEXT:  entry:
3006 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3007 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3008 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3009 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3010 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3011 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3012 // CHECK7-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
3013 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3014 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3015 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3016 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3017 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
3018 // CHECK7-NEXT:    store i32 0, i32* [[SIVAR]], align 4
3019 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3020 // CHECK7:       omp.inner.for.cond:
3021 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3022 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
3023 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
3024 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3025 // CHECK7:       omp.inner.for.body:
3026 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3027 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
3028 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3029 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
3030 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
3031 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !2
3032 // CHECK7-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
3033 // CHECK7-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !2
3034 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3035 // CHECK7:       omp.body.continue:
3036 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3037 // CHECK7:       omp.inner.for.inc:
3038 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3039 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
3040 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3041 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
3042 // CHECK7:       omp.inner.for.end:
3043 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
3044 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
3045 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
3046 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
3047 // CHECK7-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
3048 // CHECK7-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
3049 // CHECK7-NEXT:    ret i32 [[CALL]]
3050 //
3051 //
3052 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
3053 // CHECK7-SAME: () #[[ATTR1:[0-9]+]] comdat {
3054 // CHECK7-NEXT:  entry:
3055 // CHECK7-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3056 // CHECK7-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3057 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3058 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3059 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3060 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3061 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3062 // CHECK7-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
3063 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR]], align 4
3064 // CHECK7-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
3065 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
3066 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3067 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3068 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3069 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
3070 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
3071 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3072 // CHECK7:       omp.inner.for.cond:
3073 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3074 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
3075 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
3076 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3077 // CHECK7:       omp.inner.for.body:
3078 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3079 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
3080 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3081 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
3082 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
3083 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !6
3084 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
3085 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !6
3086 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3087 // CHECK7:       omp.body.continue:
3088 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3089 // CHECK7:       omp.inner.for.inc:
3090 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3091 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
3092 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3093 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
3094 // CHECK7:       omp.inner.for.end:
3095 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
3096 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
3097 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
3098 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
3099 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
3100 // CHECK7-NEXT:    ret i32 0
3101 //
3102 //
3103 // CHECK8-LABEL: define {{[^@]+}}@main
3104 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
3105 // CHECK8-NEXT:  entry:
3106 // CHECK8-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3107 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3108 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3109 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3110 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3111 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
3112 // CHECK8-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
3113 // CHECK8-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3114 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3115 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3116 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3117 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
3118 // CHECK8-NEXT:    store i32 0, i32* [[SIVAR]], align 4
3119 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3120 // CHECK8:       omp.inner.for.cond:
3121 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3122 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
3123 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
3124 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3125 // CHECK8:       omp.inner.for.body:
3126 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3127 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
3128 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3129 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
3130 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
3131 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !2
3132 // CHECK8-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
3133 // CHECK8-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !2
3134 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3135 // CHECK8:       omp.body.continue:
3136 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3137 // CHECK8:       omp.inner.for.inc:
3138 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3139 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
3140 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
3141 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
3142 // CHECK8:       omp.inner.for.end:
3143 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
3144 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
3145 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
3146 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
3147 // CHECK8-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
3148 // CHECK8-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
3149 // CHECK8-NEXT:    ret i32 [[CALL]]
3150 //
3151 //
3152 // CHECK8-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
3153 // CHECK8-SAME: () #[[ATTR1:[0-9]+]] comdat {
3154 // CHECK8-NEXT:  entry:
3155 // CHECK8-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3156 // CHECK8-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3157 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3158 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3159 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3160 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3161 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
3162 // CHECK8-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
3163 // CHECK8-NEXT:    store i32 0, i32* [[T_VAR]], align 4
3164 // CHECK8-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
3165 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
3166 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3167 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3168 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3169 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
3170 // CHECK8-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
3171 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3172 // CHECK8:       omp.inner.for.cond:
3173 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3174 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
3175 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
3176 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3177 // CHECK8:       omp.inner.for.body:
3178 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3179 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
3180 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3181 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
3182 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
3183 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !6
3184 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
3185 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !6
3186 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3187 // CHECK8:       omp.body.continue:
3188 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3189 // CHECK8:       omp.inner.for.inc:
3190 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3191 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
3192 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
3193 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
3194 // CHECK8:       omp.inner.for.end:
3195 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
3196 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
3197 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
3198 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
3199 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
3200 // CHECK8-NEXT:    ret i32 0
3201 //
3202 //
3203 // CHECK9-LABEL: define {{[^@]+}}@main
3204 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
3205 // CHECK9-NEXT:  entry:
3206 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3207 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3208 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3209 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3210 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3211 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
3212 // CHECK9-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
3213 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3214 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3215 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3216 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3217 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
3218 // CHECK9-NEXT:    store i32 0, i32* [[SIVAR]], align 4
3219 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3220 // CHECK9:       omp.inner.for.cond:
3221 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3222 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
3223 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
3224 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3225 // CHECK9:       omp.inner.for.body:
3226 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3227 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
3228 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3229 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
3230 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
3231 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !3
3232 // CHECK9-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
3233 // CHECK9-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !3
3234 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3235 // CHECK9:       omp.body.continue:
3236 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3237 // CHECK9:       omp.inner.for.inc:
3238 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3239 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
3240 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3241 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
3242 // CHECK9:       omp.inner.for.end:
3243 // CHECK9-NEXT:    store i32 2, i32* [[I]], align 4
3244 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
3245 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
3246 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
3247 // CHECK9-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
3248 // CHECK9-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
3249 // CHECK9-NEXT:    ret i32 [[CALL]]
3250 //
3251 //
3252 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
3253 // CHECK9-SAME: () #[[ATTR1:[0-9]+]] comdat {
3254 // CHECK9-NEXT:  entry:
3255 // CHECK9-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3256 // CHECK9-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3257 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3258 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3259 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3260 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3261 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
3262 // CHECK9-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
3263 // CHECK9-NEXT:    store i32 0, i32* [[T_VAR]], align 4
3264 // CHECK9-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
3265 // CHECK9-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
3266 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3267 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3268 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3269 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
3270 // CHECK9-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
3271 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3272 // CHECK9:       omp.inner.for.cond:
3273 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3274 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
3275 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
3276 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3277 // CHECK9:       omp.inner.for.body:
3278 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3279 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
3280 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3281 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7
3282 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
3283 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !7
3284 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
3285 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !7
3286 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3287 // CHECK9:       omp.body.continue:
3288 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3289 // CHECK9:       omp.inner.for.inc:
3290 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3291 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
3292 // CHECK9-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3293 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
3294 // CHECK9:       omp.inner.for.end:
3295 // CHECK9-NEXT:    store i32 2, i32* [[I]], align 4
3296 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
3297 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
3298 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
3299 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
3300 // CHECK9-NEXT:    ret i32 0
3301 //
3302 //
3303 // CHECK10-LABEL: define {{[^@]+}}@main
3304 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
3305 // CHECK10-NEXT:  entry:
3306 // CHECK10-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3307 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3308 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3309 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3310 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3311 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
3312 // CHECK10-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
3313 // CHECK10-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3314 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3315 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3316 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3317 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
3318 // CHECK10-NEXT:    store i32 0, i32* [[SIVAR]], align 4
3319 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3320 // CHECK10:       omp.inner.for.cond:
3321 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3322 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
3323 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
3324 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3325 // CHECK10:       omp.inner.for.body:
3326 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3327 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
3328 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3329 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
3330 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
3331 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !3
3332 // CHECK10-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
3333 // CHECK10-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !3
3334 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3335 // CHECK10:       omp.body.continue:
3336 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3337 // CHECK10:       omp.inner.for.inc:
3338 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3339 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
3340 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
3341 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
3342 // CHECK10:       omp.inner.for.end:
3343 // CHECK10-NEXT:    store i32 2, i32* [[I]], align 4
3344 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
3345 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
3346 // CHECK10-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
3347 // CHECK10-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
3348 // CHECK10-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
3349 // CHECK10-NEXT:    ret i32 [[CALL]]
3350 //
3351 //
3352 // CHECK10-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
3353 // CHECK10-SAME: () #[[ATTR1:[0-9]+]] comdat {
3354 // CHECK10-NEXT:  entry:
3355 // CHECK10-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3356 // CHECK10-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3357 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3358 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3359 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3360 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3361 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
3362 // CHECK10-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
3363 // CHECK10-NEXT:    store i32 0, i32* [[T_VAR]], align 4
3364 // CHECK10-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
3365 // CHECK10-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
3366 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3367 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3368 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3369 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
3370 // CHECK10-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
3371 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3372 // CHECK10:       omp.inner.for.cond:
3373 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3374 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
3375 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
3376 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3377 // CHECK10:       omp.inner.for.body:
3378 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3379 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
3380 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3381 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7
3382 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
3383 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !7
3384 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
3385 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !7
3386 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3387 // CHECK10:       omp.body.continue:
3388 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3389 // CHECK10:       omp.inner.for.inc:
3390 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3391 // CHECK10-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
3392 // CHECK10-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
3393 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
3394 // CHECK10:       omp.inner.for.end:
3395 // CHECK10-NEXT:    store i32 2, i32* [[I]], align 4
3396 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
3397 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
3398 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
3399 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
3400 // CHECK10-NEXT:    ret i32 0
3401 //
3402 //
3403 // CHECK11-LABEL: define {{[^@]+}}@main
3404 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
3405 // CHECK11-NEXT:  entry:
3406 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3407 // CHECK11-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
3408 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3409 // CHECK11-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
3410 // CHECK11-NEXT:    ret i32 0
3411 //
3412 //
3413 // CHECK12-LABEL: define {{[^@]+}}@main
3414 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
3415 // CHECK12-NEXT:  entry:
3416 // CHECK12-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3417 // CHECK12-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
3418 // CHECK12-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3419 // CHECK12-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
3420 // CHECK12-NEXT:    ret i32 0
3421 //
3422