1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
3 #ifndef HEADER
4 #define HEADER
5 
6 // Test host codegen.
7 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
8 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
9 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
10 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
11 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
13 
14 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
15 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
16 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
17 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
18 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
20 
21 // RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
22 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
23 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
24 // RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
25 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
26 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
27 
28 #ifdef CK1
29 
30 template <typename T, int X, long long Y>
31 struct SS{
32   T a[X];
33   float b;
fooSS34   int foo(void) {
35 
36     #pragma omp target teams distribute parallel for simd
37     for(int i = 0; i < X; i++) {
38       a[i] = (T)0;
39     }
40     #pragma omp target teams distribute parallel for simd schedule(static)
41     for(int i = 0; i < X; i++) {
42       a[i] = (T)0;
43     }
44     #pragma omp target teams distribute parallel for simd schedule(static, X/2)
45     for(int i = 0; i < X; i++) {
46       a[i] = (T)0;
47     }
48 
49     #pragma omp target teams distribute parallel for simd schedule(dynamic)
50     for(int i = 0; i < X; i++) {
51       a[i] = (T)0;
52     }
53 
54     #pragma omp target teams distribute parallel for simd schedule(dynamic, X/2)
55     for(int i = 0; i < X; i++) {
56       a[i] = (T)0;
57     }
58 
59 
60 
61 
62 
63 
64 
65 
66 
67 
68 
69 
70 
71 
72 
73 
74     return a[0];
75   }
76 };
77 
teams_template_struct(void)78 int teams_template_struct(void) {
79   SS<int, 123, 456> V;
80   return V.foo();
81 
82 }
83 #endif // CK1
84 
85 // Test host codegen.
86 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
87 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
88 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
89 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
90 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
91 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
92 
93 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
94 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
95 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18
96 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
97 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
98 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK20
99 
100 // RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK21
101 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
102 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK22
103 // RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK23
104 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
105 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK24
106 #ifdef CK2
107 
108 template <typename T, int n>
tmain(T argc)109 int tmain(T argc) {
110   T a[n];
111   int m = 10;
112 #pragma omp target teams distribute parallel for simd
113   for(int i = 0; i < n; i++) {
114     a[i] = (T)0;
115   }
116 #pragma omp target teams distribute parallel for simd schedule(static)
117   for(int i = 0; i < n; i++) {
118     a[i] = (T)0;
119   }
120 #pragma omp target teams distribute parallel for simd schedule(static, m)
121   for(int i = 0; i < n; i++) {
122     a[i] = (T)0;
123   }
124 #pragma omp target teams distribute parallel for simd schedule(dynamic)
125   for(int i = 0; i < n; i++) {
126     a[i] = (T)0;
127   }
128 #pragma omp target teams distribute parallel for simd schedule(dynamic, m)
129   for(int i = 0; i < n; i++) {
130     a[i] = (T)0;
131   }
132   return 0;
133 }
134 
main(int argc,char ** argv)135 int main (int argc, char **argv) {
136   int n = 100;
137   int a[n];
138   int m = 10;
139 #pragma omp target teams distribute parallel for simd
140   for(int i = 0; i < n; i++) {
141     a[i] = 0;
142   }
143 #pragma omp target teams distribute parallel for simd dist_schedule(static)
144   for(int i = 0; i < n; i++) {
145     a[i] = 0;
146   }
147 #pragma omp target teams distribute parallel for simd dist_schedule(static, m)
148   for(int i = 0; i < n; i++) {
149     a[i] = 0;
150   }
151 #pragma omp target teams distribute parallel for simd schedule(dynamic)
152   for(int i = 0; i < n; i++) {
153     a[i] = 0;
154   }
155 #pragma omp target teams distribute parallel for simd schedule(dynamic, m)
156   for(int i = 0; i < n; i++) {
157     a[i] = 0;
158   }
159   return tmain<int, 10>(argc);
160 }
161 
162 
163 
164 
165 
166 
167 
168 
169 
170 
171 
172 
173 
174 
175 
176 
177 
178 
179 
180 
181 
182 
183 
184 
185 
186 
187 
188 
189 
190 
191 
192 
193 
194 
195 #endif // CK2
196 #endif // #ifndef HEADER
197 // CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv
198 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
199 // CHECK1-NEXT:  entry:
200 // CHECK1-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
201 // CHECK1-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
202 // CHECK1-NEXT:    ret i32 [[CALL]]
203 //
204 //
205 // CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
206 // CHECK1-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
207 // CHECK1-NEXT:  entry:
208 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
209 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
210 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
211 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
212 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
213 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
214 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
215 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
216 // CHECK1-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
217 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
218 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
219 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
220 // CHECK1-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
221 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
222 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
223 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
224 // CHECK1-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
225 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
226 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
227 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
228 // CHECK1-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
229 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
230 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
231 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
232 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
233 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
234 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
235 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
236 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
237 // CHECK1-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
238 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
239 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
240 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
241 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
242 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
243 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
244 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
245 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
246 // CHECK1:       omp_offload.failed:
247 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
248 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
249 // CHECK1:       omp_offload.cont:
250 // CHECK1-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
251 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
252 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
253 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
254 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
255 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
256 // CHECK1-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
257 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
258 // CHECK1-NEXT:    store i8* null, i8** [[TMP13]], align 8
259 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
260 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
261 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
262 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
263 // CHECK1-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
264 // CHECK1-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
265 // CHECK1:       omp_offload.failed7:
266 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
267 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
268 // CHECK1:       omp_offload.cont8:
269 // CHECK1-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
270 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
271 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
272 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
273 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
274 // CHECK1-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
275 // CHECK1-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
276 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
277 // CHECK1-NEXT:    store i8* null, i8** [[TMP22]], align 8
278 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
279 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
280 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
281 // CHECK1-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
282 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
283 // CHECK1-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
284 // CHECK1:       omp_offload.failed14:
285 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
286 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
287 // CHECK1:       omp_offload.cont15:
288 // CHECK1-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
289 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
290 // CHECK1-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
291 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
292 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
293 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
294 // CHECK1-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
295 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
296 // CHECK1-NEXT:    store i8* null, i8** [[TMP31]], align 8
297 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
298 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
299 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
300 // CHECK1-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
301 // CHECK1-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
302 // CHECK1-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
303 // CHECK1:       omp_offload.failed21:
304 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
305 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
306 // CHECK1:       omp_offload.cont22:
307 // CHECK1-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
308 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
309 // CHECK1-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
310 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
311 // CHECK1-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
312 // CHECK1-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
313 // CHECK1-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
314 // CHECK1-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
315 // CHECK1-NEXT:    store i8* null, i8** [[TMP40]], align 8
316 // CHECK1-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
317 // CHECK1-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
318 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
319 // CHECK1-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
320 // CHECK1-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
321 // CHECK1-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
322 // CHECK1:       omp_offload.failed28:
323 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
324 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
325 // CHECK1:       omp_offload.cont29:
326 // CHECK1-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
327 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
328 // CHECK1-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
329 // CHECK1-NEXT:    ret i32 [[TMP45]]
330 //
331 //
332 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
333 // CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
334 // CHECK1-NEXT:  entry:
335 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
336 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
337 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
338 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
339 // CHECK1-NEXT:    ret void
340 //
341 //
342 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
343 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
344 // CHECK1-NEXT:  entry:
345 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
346 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
347 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
348 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
349 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
350 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
351 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
352 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
353 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
354 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
355 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
356 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
357 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
358 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
359 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
360 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
361 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
362 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
363 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
364 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
365 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
366 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
367 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
368 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
369 // CHECK1:       cond.true:
370 // CHECK1-NEXT:    br label [[COND_END:%.*]]
371 // CHECK1:       cond.false:
372 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
373 // CHECK1-NEXT:    br label [[COND_END]]
374 // CHECK1:       cond.end:
375 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
376 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
377 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
378 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
379 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
380 // CHECK1:       omp.inner.for.cond:
381 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
382 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
383 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
384 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
385 // CHECK1:       omp.inner.for.body:
386 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !8
387 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
388 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
389 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
390 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !8
391 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
392 // CHECK1:       omp.inner.for.inc:
393 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
394 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !8
395 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
396 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
397 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
398 // CHECK1:       omp.inner.for.end:
399 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
400 // CHECK1:       omp.loop.exit:
401 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
402 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
403 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
404 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
405 // CHECK1:       .omp.final.then:
406 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
407 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
408 // CHECK1:       .omp.final.done:
409 // CHECK1-NEXT:    ret void
410 //
411 //
412 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
413 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
414 // CHECK1-NEXT:  entry:
415 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
416 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
417 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
418 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
419 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
420 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
421 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
422 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
423 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
424 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
425 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
426 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
427 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
428 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
429 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
430 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
431 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
432 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
433 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
434 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
435 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
436 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
437 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
438 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
439 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
440 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
441 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
442 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
443 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
444 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
445 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
446 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
447 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
448 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
449 // CHECK1:       cond.true:
450 // CHECK1-NEXT:    br label [[COND_END:%.*]]
451 // CHECK1:       cond.false:
452 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
453 // CHECK1-NEXT:    br label [[COND_END]]
454 // CHECK1:       cond.end:
455 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
456 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
457 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
458 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
459 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
460 // CHECK1:       omp.inner.for.cond:
461 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
462 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
463 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
464 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
465 // CHECK1:       omp.inner.for.body:
466 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
467 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
468 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
469 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
470 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
471 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
472 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
473 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
474 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12
475 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
476 // CHECK1:       omp.body.continue:
477 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
478 // CHECK1:       omp.inner.for.inc:
479 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
480 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
481 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
482 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
483 // CHECK1:       omp.inner.for.end:
484 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
485 // CHECK1:       omp.loop.exit:
486 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
487 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
488 // CHECK1-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
489 // CHECK1-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
490 // CHECK1:       .omp.final.then:
491 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
492 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
493 // CHECK1:       .omp.final.done:
494 // CHECK1-NEXT:    ret void
495 //
496 //
497 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
498 // CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
499 // CHECK1-NEXT:  entry:
500 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
501 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
502 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
503 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
504 // CHECK1-NEXT:    ret void
505 //
506 //
507 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
508 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
509 // CHECK1-NEXT:  entry:
510 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
511 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
512 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
513 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
514 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
515 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
516 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
517 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
518 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
519 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
520 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
521 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
522 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
523 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
524 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
525 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
526 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
527 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
528 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
529 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
530 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
531 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
532 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
533 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
534 // CHECK1:       cond.true:
535 // CHECK1-NEXT:    br label [[COND_END:%.*]]
536 // CHECK1:       cond.false:
537 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
538 // CHECK1-NEXT:    br label [[COND_END]]
539 // CHECK1:       cond.end:
540 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
541 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
542 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
543 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
544 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
545 // CHECK1:       omp.inner.for.cond:
546 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
547 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
548 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
549 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
550 // CHECK1:       omp.inner.for.body:
551 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
552 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
553 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
554 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
555 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !17
556 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
557 // CHECK1:       omp.inner.for.inc:
558 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
559 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
560 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
561 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
562 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
563 // CHECK1:       omp.inner.for.end:
564 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
565 // CHECK1:       omp.loop.exit:
566 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
567 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
568 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
569 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
570 // CHECK1:       .omp.final.then:
571 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
572 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
573 // CHECK1:       .omp.final.done:
574 // CHECK1-NEXT:    ret void
575 //
576 //
577 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
578 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
579 // CHECK1-NEXT:  entry:
580 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
581 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
582 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
583 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
584 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
585 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
586 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
587 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
588 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
589 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
590 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
591 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
592 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
593 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
594 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
595 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
596 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
597 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
598 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
599 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
600 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
601 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
602 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
603 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
604 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
605 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
606 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
607 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
608 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
609 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
610 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
611 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
612 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
613 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
614 // CHECK1:       cond.true:
615 // CHECK1-NEXT:    br label [[COND_END:%.*]]
616 // CHECK1:       cond.false:
617 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
618 // CHECK1-NEXT:    br label [[COND_END]]
619 // CHECK1:       cond.end:
620 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
621 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
622 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
623 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
624 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
625 // CHECK1:       omp.inner.for.cond:
626 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
627 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
628 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
629 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
630 // CHECK1:       omp.inner.for.body:
631 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
632 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
633 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
634 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !20
635 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
636 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !20
637 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
638 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
639 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !20
640 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
641 // CHECK1:       omp.body.continue:
642 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
643 // CHECK1:       omp.inner.for.inc:
644 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
645 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
646 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
647 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
648 // CHECK1:       omp.inner.for.end:
649 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
650 // CHECK1:       omp.loop.exit:
651 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
652 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
653 // CHECK1-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
654 // CHECK1-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
655 // CHECK1:       .omp.final.then:
656 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
657 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
658 // CHECK1:       .omp.final.done:
659 // CHECK1-NEXT:    ret void
660 //
661 //
662 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
663 // CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
664 // CHECK1-NEXT:  entry:
665 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
666 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
667 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
668 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
669 // CHECK1-NEXT:    ret void
670 //
671 //
672 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
673 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
674 // CHECK1-NEXT:  entry:
675 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
676 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
677 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
678 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
679 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
680 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
681 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
682 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
683 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
684 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
685 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
686 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
687 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
688 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
689 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
690 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
691 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
692 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
693 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
694 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
695 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
696 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
697 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
698 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
699 // CHECK1:       cond.true:
700 // CHECK1-NEXT:    br label [[COND_END:%.*]]
701 // CHECK1:       cond.false:
702 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
703 // CHECK1-NEXT:    br label [[COND_END]]
704 // CHECK1:       cond.end:
705 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
706 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
707 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
708 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
709 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
710 // CHECK1:       omp.inner.for.cond:
711 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
712 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
713 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
714 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
715 // CHECK1:       omp.inner.for.body:
716 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
717 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
718 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
719 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
720 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !23
721 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
722 // CHECK1:       omp.inner.for.inc:
723 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
724 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
725 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
726 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
727 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
728 // CHECK1:       omp.inner.for.end:
729 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
730 // CHECK1:       omp.loop.exit:
731 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
732 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
733 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
734 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
735 // CHECK1:       .omp.final.then:
736 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
737 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
738 // CHECK1:       .omp.final.done:
739 // CHECK1-NEXT:    ret void
740 //
741 //
742 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
743 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
744 // CHECK1-NEXT:  entry:
745 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
746 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
747 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
748 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
749 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
750 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
751 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
752 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
753 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
754 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
755 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
756 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
757 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
758 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
759 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
760 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
761 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
762 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
763 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
764 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
765 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
766 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
767 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
768 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
769 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
770 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
771 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
772 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
773 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
774 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
775 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
776 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
777 // CHECK1:       omp.dispatch.cond:
778 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
779 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
780 // CHECK1-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
781 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
782 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
783 // CHECK1:       cond.true:
784 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
785 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
786 // CHECK1-NEXT:    br label [[COND_END:%.*]]
787 // CHECK1:       cond.false:
788 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
789 // CHECK1-NEXT:    br label [[COND_END]]
790 // CHECK1:       cond.end:
791 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
792 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
793 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
794 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
795 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
796 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
797 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
798 // CHECK1-NEXT:    br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
799 // CHECK1:       omp.dispatch.body:
800 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
801 // CHECK1:       omp.inner.for.cond:
802 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
803 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
804 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
805 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
806 // CHECK1:       omp.inner.for.body:
807 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
808 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
809 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
810 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !26
811 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
812 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !26
813 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
814 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
815 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
816 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
817 // CHECK1:       omp.body.continue:
818 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
819 // CHECK1:       omp.inner.for.inc:
820 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
821 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
822 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
823 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
824 // CHECK1:       omp.inner.for.end:
825 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
826 // CHECK1:       omp.dispatch.inc:
827 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
828 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
829 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
830 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
831 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
832 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
833 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
834 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
835 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
836 // CHECK1:       omp.dispatch.end:
837 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
838 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
839 // CHECK1-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
840 // CHECK1-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
841 // CHECK1:       .omp.final.then:
842 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
843 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
844 // CHECK1:       .omp.final.done:
845 // CHECK1-NEXT:    ret void
846 //
847 //
848 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
849 // CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
850 // CHECK1-NEXT:  entry:
851 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
852 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
853 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
854 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
855 // CHECK1-NEXT:    ret void
856 //
857 //
858 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
859 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
860 // CHECK1-NEXT:  entry:
861 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
862 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
863 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
864 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
865 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
866 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
867 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
868 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
869 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
870 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
871 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
872 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
873 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
874 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
875 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
876 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
877 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
878 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
879 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
880 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
881 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
882 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
883 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
884 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
885 // CHECK1:       cond.true:
886 // CHECK1-NEXT:    br label [[COND_END:%.*]]
887 // CHECK1:       cond.false:
888 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
889 // CHECK1-NEXT:    br label [[COND_END]]
890 // CHECK1:       cond.end:
891 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
892 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
893 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
894 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
895 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
896 // CHECK1:       omp.inner.for.cond:
897 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
898 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
899 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
900 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
901 // CHECK1:       omp.inner.for.body:
902 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
903 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
904 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
905 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
906 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !29
907 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
908 // CHECK1:       omp.inner.for.inc:
909 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
910 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
911 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
912 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
913 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
914 // CHECK1:       omp.inner.for.end:
915 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
916 // CHECK1:       omp.loop.exit:
917 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
918 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
919 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
920 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
921 // CHECK1:       .omp.final.then:
922 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
923 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
924 // CHECK1:       .omp.final.done:
925 // CHECK1-NEXT:    ret void
926 //
927 //
928 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
929 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
930 // CHECK1-NEXT:  entry:
931 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
932 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
933 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
934 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
935 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
936 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
937 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
938 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
939 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
940 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
941 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
942 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
943 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
944 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
945 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
946 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
947 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
948 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
949 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
950 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
951 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
952 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
953 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
954 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
955 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
956 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
957 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
958 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
959 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
960 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
961 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
962 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
963 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
964 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
965 // CHECK1:       omp.dispatch.cond:
966 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
967 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
968 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
969 // CHECK1:       omp.dispatch.body:
970 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
971 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
972 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
973 // CHECK1:       omp.inner.for.cond:
974 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
975 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
976 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
977 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
978 // CHECK1:       omp.inner.for.body:
979 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
980 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
981 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
982 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
983 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
984 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !32
985 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
986 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
987 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
988 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
989 // CHECK1:       omp.body.continue:
990 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
991 // CHECK1:       omp.inner.for.inc:
992 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
993 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
994 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
995 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
996 // CHECK1:       omp.inner.for.end:
997 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
998 // CHECK1:       omp.dispatch.inc:
999 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
1000 // CHECK1:       omp.dispatch.end:
1001 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1002 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1003 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1004 // CHECK1:       .omp.final.then:
1005 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
1006 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1007 // CHECK1:       .omp.final.done:
1008 // CHECK1-NEXT:    ret void
1009 //
1010 //
1011 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
1012 // CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1013 // CHECK1-NEXT:  entry:
1014 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1015 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1016 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1017 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
1018 // CHECK1-NEXT:    ret void
1019 //
1020 //
1021 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
1022 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1023 // CHECK1-NEXT:  entry:
1024 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1025 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1026 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1027 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1028 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1029 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1030 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1031 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1032 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1033 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1034 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1035 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1036 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1037 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1038 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1039 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
1040 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1041 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1042 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1043 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1044 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1045 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1046 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1047 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1048 // CHECK1:       cond.true:
1049 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1050 // CHECK1:       cond.false:
1051 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1052 // CHECK1-NEXT:    br label [[COND_END]]
1053 // CHECK1:       cond.end:
1054 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1055 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1056 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1057 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1058 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1059 // CHECK1:       omp.inner.for.cond:
1060 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1061 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
1062 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1063 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1064 // CHECK1:       omp.inner.for.body:
1065 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
1066 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1067 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
1068 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1069 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !35
1070 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1071 // CHECK1:       omp.inner.for.inc:
1072 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1073 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
1074 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1075 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1076 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
1077 // CHECK1:       omp.inner.for.end:
1078 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1079 // CHECK1:       omp.loop.exit:
1080 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1081 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1082 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1083 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1084 // CHECK1:       .omp.final.then:
1085 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
1086 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1087 // CHECK1:       .omp.final.done:
1088 // CHECK1-NEXT:    ret void
1089 //
1090 //
1091 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
1092 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1093 // CHECK1-NEXT:  entry:
1094 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1095 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1096 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1097 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1098 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1099 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1100 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1101 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1102 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1103 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1104 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1105 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1106 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1107 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1108 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1109 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1110 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1111 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1112 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1113 // CHECK1-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
1114 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1115 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1116 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1117 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1118 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1119 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1120 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1121 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1122 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1123 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1124 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1125 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1126 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
1127 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
1128 // CHECK1:       omp.dispatch.cond:
1129 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
1130 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1131 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1132 // CHECK1:       omp.dispatch.body:
1133 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1134 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
1135 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1136 // CHECK1:       omp.inner.for.cond:
1137 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1138 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
1139 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1140 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1141 // CHECK1:       omp.inner.for.body:
1142 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1143 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1144 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1145 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
1146 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
1147 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !38
1148 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1149 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
1150 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
1151 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1152 // CHECK1:       omp.body.continue:
1153 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1154 // CHECK1:       omp.inner.for.inc:
1155 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1156 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1157 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1158 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
1159 // CHECK1:       omp.inner.for.end:
1160 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
1161 // CHECK1:       omp.dispatch.inc:
1162 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
1163 // CHECK1:       omp.dispatch.end:
1164 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1165 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1166 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1167 // CHECK1:       .omp.final.then:
1168 // CHECK1-NEXT:    store i32 123, i32* [[I]], align 4
1169 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1170 // CHECK1:       .omp.final.done:
1171 // CHECK1-NEXT:    ret void
1172 //
1173 //
1174 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1175 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
1176 // CHECK1-NEXT:  entry:
1177 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
1178 // CHECK1-NEXT:    ret void
1179 //
1180 //
1181 // CHECK2-LABEL: define {{[^@]+}}@_Z21teams_template_structv
1182 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1183 // CHECK2-NEXT:  entry:
1184 // CHECK2-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1185 // CHECK2-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
1186 // CHECK2-NEXT:    ret i32 [[CALL]]
1187 //
1188 //
1189 // CHECK2-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
1190 // CHECK2-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
1191 // CHECK2-NEXT:  entry:
1192 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1193 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
1194 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
1195 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
1196 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1197 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
1198 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
1199 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
1200 // CHECK2-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
1201 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
1202 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
1203 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
1204 // CHECK2-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
1205 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
1206 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
1207 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
1208 // CHECK2-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
1209 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
1210 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
1211 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
1212 // CHECK2-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
1213 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1214 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1215 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
1216 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1217 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
1218 // CHECK2-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
1219 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1220 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
1221 // CHECK2-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
1222 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1223 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1224 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1225 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1226 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
1227 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1228 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1229 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1230 // CHECK2:       omp_offload.failed:
1231 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
1232 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1233 // CHECK2:       omp_offload.cont:
1234 // CHECK2-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1235 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1236 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
1237 // CHECK2-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
1238 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1239 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
1240 // CHECK2-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
1241 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
1242 // CHECK2-NEXT:    store i8* null, i8** [[TMP13]], align 8
1243 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1244 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1245 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
1246 // CHECK2-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1247 // CHECK2-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
1248 // CHECK2-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
1249 // CHECK2:       omp_offload.failed7:
1250 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
1251 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
1252 // CHECK2:       omp_offload.cont8:
1253 // CHECK2-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1254 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
1255 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
1256 // CHECK2-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
1257 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
1258 // CHECK2-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
1259 // CHECK2-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
1260 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
1261 // CHECK2-NEXT:    store i8* null, i8** [[TMP22]], align 8
1262 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
1263 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
1264 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
1265 // CHECK2-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1266 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1267 // CHECK2-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
1268 // CHECK2:       omp_offload.failed14:
1269 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
1270 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
1271 // CHECK2:       omp_offload.cont15:
1272 // CHECK2-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1273 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
1274 // CHECK2-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
1275 // CHECK2-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
1276 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
1277 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
1278 // CHECK2-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
1279 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
1280 // CHECK2-NEXT:    store i8* null, i8** [[TMP31]], align 8
1281 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
1282 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
1283 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
1284 // CHECK2-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1285 // CHECK2-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
1286 // CHECK2-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
1287 // CHECK2:       omp_offload.failed21:
1288 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
1289 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
1290 // CHECK2:       omp_offload.cont22:
1291 // CHECK2-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1292 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
1293 // CHECK2-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
1294 // CHECK2-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
1295 // CHECK2-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
1296 // CHECK2-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
1297 // CHECK2-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
1298 // CHECK2-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
1299 // CHECK2-NEXT:    store i8* null, i8** [[TMP40]], align 8
1300 // CHECK2-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
1301 // CHECK2-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
1302 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
1303 // CHECK2-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1304 // CHECK2-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
1305 // CHECK2-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
1306 // CHECK2:       omp_offload.failed28:
1307 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
1308 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
1309 // CHECK2:       omp_offload.cont29:
1310 // CHECK2-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1311 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
1312 // CHECK2-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
1313 // CHECK2-NEXT:    ret i32 [[TMP45]]
1314 //
1315 //
1316 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
1317 // CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
1318 // CHECK2-NEXT:  entry:
1319 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1320 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1321 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1322 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
1323 // CHECK2-NEXT:    ret void
1324 //
1325 //
1326 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
1327 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1328 // CHECK2-NEXT:  entry:
1329 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1330 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1331 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1332 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1333 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1334 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1335 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1336 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1337 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1338 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1339 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1340 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1341 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1342 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1343 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1344 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
1345 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1346 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1347 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1348 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1349 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1350 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1351 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1352 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1353 // CHECK2:       cond.true:
1354 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1355 // CHECK2:       cond.false:
1356 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1357 // CHECK2-NEXT:    br label [[COND_END]]
1358 // CHECK2:       cond.end:
1359 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1360 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1361 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1362 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1363 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1364 // CHECK2:       omp.inner.for.cond:
1365 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
1366 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
1367 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1368 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1369 // CHECK2:       omp.inner.for.body:
1370 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !8
1371 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1372 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
1373 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1374 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !8
1375 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1376 // CHECK2:       omp.inner.for.inc:
1377 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
1378 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !8
1379 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1380 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
1381 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
1382 // CHECK2:       omp.inner.for.end:
1383 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1384 // CHECK2:       omp.loop.exit:
1385 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1386 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1387 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1388 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1389 // CHECK2:       .omp.final.then:
1390 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1391 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1392 // CHECK2:       .omp.final.done:
1393 // CHECK2-NEXT:    ret void
1394 //
1395 //
1396 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
1397 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1398 // CHECK2-NEXT:  entry:
1399 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1400 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1401 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1402 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1403 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1404 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1405 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1406 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1407 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1408 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1409 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1410 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1411 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1412 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1413 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1414 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1415 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1416 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1417 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1418 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
1419 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1420 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1421 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1422 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1423 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1424 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1425 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1426 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1427 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1428 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1429 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1430 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1431 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1432 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1433 // CHECK2:       cond.true:
1434 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1435 // CHECK2:       cond.false:
1436 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1437 // CHECK2-NEXT:    br label [[COND_END]]
1438 // CHECK2:       cond.end:
1439 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1440 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1441 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1442 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1443 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1444 // CHECK2:       omp.inner.for.cond:
1445 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
1446 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
1447 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1448 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1449 // CHECK2:       omp.inner.for.body:
1450 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
1451 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1452 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1453 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
1454 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
1455 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
1456 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1457 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
1458 // CHECK2-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12
1459 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1460 // CHECK2:       omp.body.continue:
1461 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1462 // CHECK2:       omp.inner.for.inc:
1463 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
1464 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1465 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
1466 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
1467 // CHECK2:       omp.inner.for.end:
1468 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1469 // CHECK2:       omp.loop.exit:
1470 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1471 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1472 // CHECK2-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1473 // CHECK2-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1474 // CHECK2:       .omp.final.then:
1475 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1476 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1477 // CHECK2:       .omp.final.done:
1478 // CHECK2-NEXT:    ret void
1479 //
1480 //
1481 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
1482 // CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1483 // CHECK2-NEXT:  entry:
1484 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1485 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1486 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1487 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
1488 // CHECK2-NEXT:    ret void
1489 //
1490 //
1491 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
1492 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1493 // CHECK2-NEXT:  entry:
1494 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1495 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1496 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1497 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1498 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1499 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1500 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1501 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1502 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1503 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1504 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1505 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1506 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1507 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1508 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1509 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
1510 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1511 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1512 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1513 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1514 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1515 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1516 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1517 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1518 // CHECK2:       cond.true:
1519 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1520 // CHECK2:       cond.false:
1521 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1522 // CHECK2-NEXT:    br label [[COND_END]]
1523 // CHECK2:       cond.end:
1524 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1525 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1526 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1527 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1528 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1529 // CHECK2:       omp.inner.for.cond:
1530 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
1531 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
1532 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1533 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1534 // CHECK2:       omp.inner.for.body:
1535 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
1536 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1537 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
1538 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1539 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !17
1540 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1541 // CHECK2:       omp.inner.for.inc:
1542 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
1543 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
1544 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1545 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
1546 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
1547 // CHECK2:       omp.inner.for.end:
1548 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1549 // CHECK2:       omp.loop.exit:
1550 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1551 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1552 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1553 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1554 // CHECK2:       .omp.final.then:
1555 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1556 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1557 // CHECK2:       .omp.final.done:
1558 // CHECK2-NEXT:    ret void
1559 //
1560 //
1561 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
1562 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1563 // CHECK2-NEXT:  entry:
1564 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1565 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1566 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1567 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1568 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1569 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1570 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1571 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1572 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1573 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1574 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1575 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1576 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1577 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1578 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1579 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1580 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1581 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1582 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1583 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
1584 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1585 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1586 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1587 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1588 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1589 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1590 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1591 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1592 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1593 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1594 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1595 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1596 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1597 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1598 // CHECK2:       cond.true:
1599 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1600 // CHECK2:       cond.false:
1601 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1602 // CHECK2-NEXT:    br label [[COND_END]]
1603 // CHECK2:       cond.end:
1604 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1605 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1606 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1607 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1608 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1609 // CHECK2:       omp.inner.for.cond:
1610 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
1611 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
1612 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1613 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1614 // CHECK2:       omp.inner.for.body:
1615 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
1616 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1617 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1618 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !20
1619 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
1620 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !20
1621 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1622 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
1623 // CHECK2-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !20
1624 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1625 // CHECK2:       omp.body.continue:
1626 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1627 // CHECK2:       omp.inner.for.inc:
1628 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
1629 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1630 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
1631 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
1632 // CHECK2:       omp.inner.for.end:
1633 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1634 // CHECK2:       omp.loop.exit:
1635 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1636 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1637 // CHECK2-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1638 // CHECK2-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1639 // CHECK2:       .omp.final.then:
1640 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1641 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1642 // CHECK2:       .omp.final.done:
1643 // CHECK2-NEXT:    ret void
1644 //
1645 //
1646 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
1647 // CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1648 // CHECK2-NEXT:  entry:
1649 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1650 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1651 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1652 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
1653 // CHECK2-NEXT:    ret void
1654 //
1655 //
1656 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
1657 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1658 // CHECK2-NEXT:  entry:
1659 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1660 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1661 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1662 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1663 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1664 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1665 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1666 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1667 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1668 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1669 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1670 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1671 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1672 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1673 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1674 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
1675 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1676 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1677 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1678 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1679 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1680 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1681 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1682 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1683 // CHECK2:       cond.true:
1684 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1685 // CHECK2:       cond.false:
1686 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1687 // CHECK2-NEXT:    br label [[COND_END]]
1688 // CHECK2:       cond.end:
1689 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1690 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1691 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1692 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1693 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1694 // CHECK2:       omp.inner.for.cond:
1695 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
1696 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
1697 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1698 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1699 // CHECK2:       omp.inner.for.body:
1700 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
1701 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1702 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
1703 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1704 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !23
1705 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1706 // CHECK2:       omp.inner.for.inc:
1707 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
1708 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
1709 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1710 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
1711 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
1712 // CHECK2:       omp.inner.for.end:
1713 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1714 // CHECK2:       omp.loop.exit:
1715 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1716 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1717 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1718 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1719 // CHECK2:       .omp.final.then:
1720 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1721 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1722 // CHECK2:       .omp.final.done:
1723 // CHECK2-NEXT:    ret void
1724 //
1725 //
1726 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
1727 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1728 // CHECK2-NEXT:  entry:
1729 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1730 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1731 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1732 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1733 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1734 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1735 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1736 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1737 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1738 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1739 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1740 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1741 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1742 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1743 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1744 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1745 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1746 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1747 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1748 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
1749 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1750 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1751 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1752 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1753 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1754 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1755 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1756 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1757 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1758 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1759 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
1760 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
1761 // CHECK2:       omp.dispatch.cond:
1762 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1763 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1764 // CHECK2-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
1765 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
1766 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1767 // CHECK2:       cond.true:
1768 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1769 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
1770 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1771 // CHECK2:       cond.false:
1772 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1773 // CHECK2-NEXT:    br label [[COND_END]]
1774 // CHECK2:       cond.end:
1775 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
1776 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1777 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1778 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
1779 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1780 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1781 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
1782 // CHECK2-NEXT:    br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1783 // CHECK2:       omp.dispatch.body:
1784 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1785 // CHECK2:       omp.inner.for.cond:
1786 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
1787 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
1788 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
1789 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1790 // CHECK2:       omp.inner.for.body:
1791 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
1792 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
1793 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1794 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !26
1795 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
1796 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !26
1797 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
1798 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
1799 // CHECK2-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
1800 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1801 // CHECK2:       omp.body.continue:
1802 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1803 // CHECK2:       omp.inner.for.inc:
1804 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
1805 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
1806 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
1807 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
1808 // CHECK2:       omp.inner.for.end:
1809 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
1810 // CHECK2:       omp.dispatch.inc:
1811 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1812 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1813 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
1814 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
1815 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1816 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1817 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
1818 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
1819 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
1820 // CHECK2:       omp.dispatch.end:
1821 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1822 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1823 // CHECK2-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
1824 // CHECK2-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1825 // CHECK2:       .omp.final.then:
1826 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1827 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1828 // CHECK2:       .omp.final.done:
1829 // CHECK2-NEXT:    ret void
1830 //
1831 //
1832 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
1833 // CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1834 // CHECK2-NEXT:  entry:
1835 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1836 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1837 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1838 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
1839 // CHECK2-NEXT:    ret void
1840 //
1841 //
1842 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
1843 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1844 // CHECK2-NEXT:  entry:
1845 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1846 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1847 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1848 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1849 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1850 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1851 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1852 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1853 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1854 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1855 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1856 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1857 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1858 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1859 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1860 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
1861 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1862 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1863 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1864 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1865 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1866 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1867 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1868 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1869 // CHECK2:       cond.true:
1870 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1871 // CHECK2:       cond.false:
1872 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1873 // CHECK2-NEXT:    br label [[COND_END]]
1874 // CHECK2:       cond.end:
1875 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1876 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1877 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1878 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1879 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1880 // CHECK2:       omp.inner.for.cond:
1881 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
1882 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
1883 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1884 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1885 // CHECK2:       omp.inner.for.body:
1886 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
1887 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1888 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
1889 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1890 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !29
1891 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1892 // CHECK2:       omp.inner.for.inc:
1893 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
1894 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
1895 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1896 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
1897 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
1898 // CHECK2:       omp.inner.for.end:
1899 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1900 // CHECK2:       omp.loop.exit:
1901 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1902 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1903 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1904 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1905 // CHECK2:       .omp.final.then:
1906 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1907 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1908 // CHECK2:       .omp.final.done:
1909 // CHECK2-NEXT:    ret void
1910 //
1911 //
1912 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
1913 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1914 // CHECK2-NEXT:  entry:
1915 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1916 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1917 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1918 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1919 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1920 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1921 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1922 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1923 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1924 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1925 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1926 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
1927 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1928 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1929 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1930 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1931 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1932 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1933 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1934 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
1935 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1936 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1937 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1938 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1939 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1940 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
1941 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1942 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1943 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1944 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1945 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1946 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1947 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
1948 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
1949 // CHECK2:       omp.dispatch.cond:
1950 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
1951 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1952 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1953 // CHECK2:       omp.dispatch.body:
1954 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1955 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
1956 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1957 // CHECK2:       omp.inner.for.cond:
1958 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1959 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
1960 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1961 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1962 // CHECK2:       omp.inner.for.body:
1963 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1964 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1965 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1966 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
1967 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
1968 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !32
1969 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1970 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
1971 // CHECK2-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
1972 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1973 // CHECK2:       omp.body.continue:
1974 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1975 // CHECK2:       omp.inner.for.inc:
1976 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1977 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1978 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1979 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
1980 // CHECK2:       omp.inner.for.end:
1981 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
1982 // CHECK2:       omp.dispatch.inc:
1983 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
1984 // CHECK2:       omp.dispatch.end:
1985 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1986 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1987 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1988 // CHECK2:       .omp.final.then:
1989 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
1990 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1991 // CHECK2:       .omp.final.done:
1992 // CHECK2-NEXT:    ret void
1993 //
1994 //
1995 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
1996 // CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
1997 // CHECK2-NEXT:  entry:
1998 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1999 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
2000 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
2001 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2002 // CHECK2-NEXT:    ret void
2003 //
2004 //
2005 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14
2006 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2007 // CHECK2-NEXT:  entry:
2008 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2009 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2010 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
2011 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2012 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2013 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2014 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2015 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2016 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2017 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2018 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2019 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2020 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
2021 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
2022 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2023 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2024 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2025 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2026 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2027 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2028 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2029 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2030 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2031 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2032 // CHECK2:       cond.true:
2033 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2034 // CHECK2:       cond.false:
2035 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2036 // CHECK2-NEXT:    br label [[COND_END]]
2037 // CHECK2:       cond.end:
2038 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2039 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2040 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2041 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2042 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2043 // CHECK2:       omp.inner.for.cond:
2044 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2045 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
2046 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2047 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2048 // CHECK2:       omp.inner.for.body:
2049 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
2050 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2051 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
2052 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2053 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !35
2054 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2055 // CHECK2:       omp.inner.for.inc:
2056 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2057 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
2058 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2059 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2060 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
2061 // CHECK2:       omp.inner.for.end:
2062 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2063 // CHECK2:       omp.loop.exit:
2064 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2065 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2066 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2067 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2068 // CHECK2:       .omp.final.then:
2069 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
2070 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2071 // CHECK2:       .omp.final.done:
2072 // CHECK2-NEXT:    ret void
2073 //
2074 //
2075 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15
2076 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2077 // CHECK2-NEXT:  entry:
2078 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2079 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2080 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2081 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2082 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
2083 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2084 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2085 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2086 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2087 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2088 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2089 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2090 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2091 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2092 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2093 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2094 // CHECK2-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
2095 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
2096 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2097 // CHECK2-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
2098 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2099 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2100 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2101 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2102 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2103 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
2104 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2105 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2106 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2107 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2108 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2109 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2110 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
2111 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2112 // CHECK2:       omp.dispatch.cond:
2113 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2114 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2115 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2116 // CHECK2:       omp.dispatch.body:
2117 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2118 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
2119 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2120 // CHECK2:       omp.inner.for.cond:
2121 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
2122 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
2123 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2124 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2125 // CHECK2:       omp.inner.for.body:
2126 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
2127 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2128 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2129 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
2130 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
2131 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !38
2132 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
2133 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
2134 // CHECK2-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
2135 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2136 // CHECK2:       omp.body.continue:
2137 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2138 // CHECK2:       omp.inner.for.inc:
2139 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
2140 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
2141 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
2142 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
2143 // CHECK2:       omp.inner.for.end:
2144 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2145 // CHECK2:       omp.dispatch.inc:
2146 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
2147 // CHECK2:       omp.dispatch.end:
2148 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2149 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2150 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2151 // CHECK2:       .omp.final.then:
2152 // CHECK2-NEXT:    store i32 123, i32* [[I]], align 4
2153 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2154 // CHECK2:       .omp.final.done:
2155 // CHECK2-NEXT:    ret void
2156 //
2157 //
2158 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2159 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] {
2160 // CHECK2-NEXT:  entry:
2161 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
2162 // CHECK2-NEXT:    ret void
2163 //
2164 //
2165 // CHECK3-LABEL: define {{[^@]+}}@_Z21teams_template_structv
2166 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
2167 // CHECK3-NEXT:  entry:
2168 // CHECK3-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
2169 // CHECK3-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
2170 // CHECK3-NEXT:    ret i32 [[CALL]]
2171 //
2172 //
2173 // CHECK3-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
2174 // CHECK3-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
2175 // CHECK3-NEXT:  entry:
2176 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2177 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
2178 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
2179 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
2180 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2181 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
2182 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
2183 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
2184 // CHECK3-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
2185 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
2186 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
2187 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
2188 // CHECK3-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
2189 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
2190 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
2191 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
2192 // CHECK3-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
2193 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
2194 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
2195 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
2196 // CHECK3-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
2197 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2198 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2199 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
2200 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2201 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
2202 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
2203 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2204 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
2205 // CHECK3-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
2206 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2207 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2208 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2209 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2210 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
2211 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2212 // CHECK3-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
2213 // CHECK3-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2214 // CHECK3:       omp_offload.failed:
2215 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
2216 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2217 // CHECK3:       omp_offload.cont:
2218 // CHECK3-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2219 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2220 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
2221 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
2222 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2223 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
2224 // CHECK3-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
2225 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
2226 // CHECK3-NEXT:    store i8* null, i8** [[TMP13]], align 4
2227 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2228 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2229 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
2230 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2231 // CHECK3-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
2232 // CHECK3-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
2233 // CHECK3:       omp_offload.failed7:
2234 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
2235 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
2236 // CHECK3:       omp_offload.cont8:
2237 // CHECK3-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2238 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
2239 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
2240 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
2241 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
2242 // CHECK3-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
2243 // CHECK3-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
2244 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
2245 // CHECK3-NEXT:    store i8* null, i8** [[TMP22]], align 4
2246 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
2247 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
2248 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
2249 // CHECK3-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2250 // CHECK3-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2251 // CHECK3-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
2252 // CHECK3:       omp_offload.failed14:
2253 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
2254 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
2255 // CHECK3:       omp_offload.cont15:
2256 // CHECK3-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2257 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
2258 // CHECK3-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
2259 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
2260 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
2261 // CHECK3-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
2262 // CHECK3-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
2263 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
2264 // CHECK3-NEXT:    store i8* null, i8** [[TMP31]], align 4
2265 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
2266 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
2267 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
2268 // CHECK3-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2269 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2270 // CHECK3-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
2271 // CHECK3:       omp_offload.failed21:
2272 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
2273 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
2274 // CHECK3:       omp_offload.cont22:
2275 // CHECK3-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2276 // CHECK3-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
2277 // CHECK3-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
2278 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
2279 // CHECK3-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
2280 // CHECK3-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
2281 // CHECK3-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
2282 // CHECK3-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
2283 // CHECK3-NEXT:    store i8* null, i8** [[TMP40]], align 4
2284 // CHECK3-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
2285 // CHECK3-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
2286 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
2287 // CHECK3-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2288 // CHECK3-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
2289 // CHECK3-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
2290 // CHECK3:       omp_offload.failed28:
2291 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
2292 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
2293 // CHECK3:       omp_offload.cont29:
2294 // CHECK3-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2295 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
2296 // CHECK3-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
2297 // CHECK3-NEXT:    ret i32 [[TMP45]]
2298 //
2299 //
2300 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
2301 // CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
2302 // CHECK3-NEXT:  entry:
2303 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2304 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2305 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2306 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2307 // CHECK3-NEXT:    ret void
2308 //
2309 //
2310 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
2311 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2312 // CHECK3-NEXT:  entry:
2313 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2314 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2315 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2316 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2317 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2318 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2319 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2320 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2321 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2322 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2323 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2324 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2325 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2326 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2327 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2328 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2329 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2330 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2331 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2332 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2333 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2334 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2335 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2336 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2337 // CHECK3:       cond.true:
2338 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2339 // CHECK3:       cond.false:
2340 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2341 // CHECK3-NEXT:    br label [[COND_END]]
2342 // CHECK3:       cond.end:
2343 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2344 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2345 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2346 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2347 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2348 // CHECK3:       omp.inner.for.cond:
2349 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
2350 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
2351 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2352 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2353 // CHECK3:       omp.inner.for.body:
2354 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !9
2355 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
2356 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !9
2357 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2358 // CHECK3:       omp.inner.for.inc:
2359 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
2360 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !9
2361 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2362 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
2363 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
2364 // CHECK3:       omp.inner.for.end:
2365 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2366 // CHECK3:       omp.loop.exit:
2367 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2368 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2369 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2370 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2371 // CHECK3:       .omp.final.then:
2372 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2373 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2374 // CHECK3:       .omp.final.done:
2375 // CHECK3-NEXT:    ret void
2376 //
2377 //
2378 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
2379 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2380 // CHECK3-NEXT:  entry:
2381 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2382 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2383 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2384 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2385 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2386 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2387 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2388 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2389 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2390 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2391 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2392 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2393 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2394 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2395 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2396 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2397 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2398 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2399 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2400 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
2401 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2402 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2403 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2404 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2405 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2406 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2407 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2408 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2409 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2410 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2411 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2412 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2413 // CHECK3:       cond.true:
2414 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2415 // CHECK3:       cond.false:
2416 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2417 // CHECK3-NEXT:    br label [[COND_END]]
2418 // CHECK3:       cond.end:
2419 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2420 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2421 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2422 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2423 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2424 // CHECK3:       omp.inner.for.cond:
2425 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
2426 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
2427 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2428 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2429 // CHECK3:       omp.inner.for.body:
2430 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
2431 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2432 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2433 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
2434 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
2435 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
2436 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
2437 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
2438 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2439 // CHECK3:       omp.body.continue:
2440 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2441 // CHECK3:       omp.inner.for.inc:
2442 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
2443 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2444 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
2445 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
2446 // CHECK3:       omp.inner.for.end:
2447 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2448 // CHECK3:       omp.loop.exit:
2449 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2450 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2451 // CHECK3-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2452 // CHECK3-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2453 // CHECK3:       .omp.final.then:
2454 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2455 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2456 // CHECK3:       .omp.final.done:
2457 // CHECK3-NEXT:    ret void
2458 //
2459 //
2460 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
2461 // CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2462 // CHECK3-NEXT:  entry:
2463 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2464 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2465 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2466 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2467 // CHECK3-NEXT:    ret void
2468 //
2469 //
2470 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
2471 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2472 // CHECK3-NEXT:  entry:
2473 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2474 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2475 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2476 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2477 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2478 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2479 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2480 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2481 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2482 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2483 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2484 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2485 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2486 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2487 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2488 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2489 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2490 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2491 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2492 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2493 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2494 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2495 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2496 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2497 // CHECK3:       cond.true:
2498 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2499 // CHECK3:       cond.false:
2500 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2501 // CHECK3-NEXT:    br label [[COND_END]]
2502 // CHECK3:       cond.end:
2503 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2504 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2505 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2506 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2507 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2508 // CHECK3:       omp.inner.for.cond:
2509 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
2510 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
2511 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2512 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2513 // CHECK3:       omp.inner.for.body:
2514 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
2515 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
2516 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !18
2517 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2518 // CHECK3:       omp.inner.for.inc:
2519 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
2520 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
2521 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2522 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
2523 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
2524 // CHECK3:       omp.inner.for.end:
2525 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2526 // CHECK3:       omp.loop.exit:
2527 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2528 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2529 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2530 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2531 // CHECK3:       .omp.final.then:
2532 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2533 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2534 // CHECK3:       .omp.final.done:
2535 // CHECK3-NEXT:    ret void
2536 //
2537 //
2538 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
2539 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2540 // CHECK3-NEXT:  entry:
2541 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2542 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2543 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2544 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2545 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2546 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2547 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2548 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2549 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2550 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2551 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2552 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2553 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2554 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2555 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2556 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2557 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2558 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2559 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2560 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
2561 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2562 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2563 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2564 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2565 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2566 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2567 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2568 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2569 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2570 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2571 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2572 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2573 // CHECK3:       cond.true:
2574 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2575 // CHECK3:       cond.false:
2576 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2577 // CHECK3-NEXT:    br label [[COND_END]]
2578 // CHECK3:       cond.end:
2579 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2580 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2581 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2582 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
2583 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2584 // CHECK3:       omp.inner.for.cond:
2585 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
2586 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
2587 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2588 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2589 // CHECK3:       omp.inner.for.body:
2590 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
2591 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2592 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2593 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
2594 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
2595 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
2596 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
2597 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
2598 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2599 // CHECK3:       omp.body.continue:
2600 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2601 // CHECK3:       omp.inner.for.inc:
2602 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
2603 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2604 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
2605 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
2606 // CHECK3:       omp.inner.for.end:
2607 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2608 // CHECK3:       omp.loop.exit:
2609 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2610 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2611 // CHECK3-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2612 // CHECK3-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2613 // CHECK3:       .omp.final.then:
2614 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2615 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2616 // CHECK3:       .omp.final.done:
2617 // CHECK3-NEXT:    ret void
2618 //
2619 //
2620 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
2621 // CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2622 // CHECK3-NEXT:  entry:
2623 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2624 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2625 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2626 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2627 // CHECK3-NEXT:    ret void
2628 //
2629 //
2630 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
2631 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2632 // CHECK3-NEXT:  entry:
2633 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2634 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2635 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2636 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2637 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2638 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2639 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2640 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2641 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2642 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2643 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2644 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2645 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2646 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2647 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2648 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2649 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2650 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2651 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2652 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2653 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2654 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2655 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2656 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2657 // CHECK3:       cond.true:
2658 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2659 // CHECK3:       cond.false:
2660 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2661 // CHECK3-NEXT:    br label [[COND_END]]
2662 // CHECK3:       cond.end:
2663 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2664 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2665 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2666 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2667 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2668 // CHECK3:       omp.inner.for.cond:
2669 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
2670 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
2671 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2672 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2673 // CHECK3:       omp.inner.for.body:
2674 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24
2675 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
2676 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !24
2677 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2678 // CHECK3:       omp.inner.for.inc:
2679 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
2680 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24
2681 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2682 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
2683 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
2684 // CHECK3:       omp.inner.for.end:
2685 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2686 // CHECK3:       omp.loop.exit:
2687 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2688 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2689 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2690 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2691 // CHECK3:       .omp.final.then:
2692 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2693 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2694 // CHECK3:       .omp.final.done:
2695 // CHECK3-NEXT:    ret void
2696 //
2697 //
2698 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
2699 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2700 // CHECK3-NEXT:  entry:
2701 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2702 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2703 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2704 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2705 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2706 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2707 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2708 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2709 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2710 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2711 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2712 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2713 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2714 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2715 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2716 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2717 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2718 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2719 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2720 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
2721 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2722 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2723 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2724 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2725 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2726 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2727 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2728 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2729 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
2730 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2731 // CHECK3:       omp.dispatch.cond:
2732 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2733 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2734 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
2735 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2736 // CHECK3:       cond.true:
2737 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2738 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2739 // CHECK3:       cond.false:
2740 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2741 // CHECK3-NEXT:    br label [[COND_END]]
2742 // CHECK3:       cond.end:
2743 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
2744 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2745 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2746 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
2747 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2748 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2749 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
2750 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2751 // CHECK3:       omp.dispatch.body:
2752 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2753 // CHECK3:       omp.inner.for.cond:
2754 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
2755 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
2756 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
2757 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2758 // CHECK3:       omp.inner.for.body:
2759 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
2760 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
2761 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2762 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
2763 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
2764 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27
2765 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
2766 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
2767 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2768 // CHECK3:       omp.body.continue:
2769 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2770 // CHECK3:       omp.inner.for.inc:
2771 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
2772 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
2773 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
2774 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
2775 // CHECK3:       omp.inner.for.end:
2776 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2777 // CHECK3:       omp.dispatch.inc:
2778 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2779 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2780 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
2781 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
2782 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2783 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2784 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2785 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
2786 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
2787 // CHECK3:       omp.dispatch.end:
2788 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2789 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2790 // CHECK3-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
2791 // CHECK3-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2792 // CHECK3:       .omp.final.then:
2793 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2794 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2795 // CHECK3:       .omp.final.done:
2796 // CHECK3-NEXT:    ret void
2797 //
2798 //
2799 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
2800 // CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2801 // CHECK3-NEXT:  entry:
2802 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2803 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2804 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2805 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2806 // CHECK3-NEXT:    ret void
2807 //
2808 //
2809 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
2810 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2811 // CHECK3-NEXT:  entry:
2812 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2813 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2814 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2815 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2816 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2817 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2818 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2819 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2820 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2821 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2822 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2823 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2824 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2825 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2826 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2827 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2828 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2829 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2830 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2831 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2832 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2833 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2834 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2835 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2836 // CHECK3:       cond.true:
2837 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2838 // CHECK3:       cond.false:
2839 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2840 // CHECK3-NEXT:    br label [[COND_END]]
2841 // CHECK3:       cond.end:
2842 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2843 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2844 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2845 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2846 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2847 // CHECK3:       omp.inner.for.cond:
2848 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
2849 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
2850 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2851 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2852 // CHECK3:       omp.inner.for.body:
2853 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30
2854 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
2855 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !30
2856 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2857 // CHECK3:       omp.inner.for.inc:
2858 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
2859 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30
2860 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2861 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
2862 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
2863 // CHECK3:       omp.inner.for.end:
2864 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2865 // CHECK3:       omp.loop.exit:
2866 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2867 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2868 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2869 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2870 // CHECK3:       .omp.final.then:
2871 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2872 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2873 // CHECK3:       .omp.final.done:
2874 // CHECK3-NEXT:    ret void
2875 //
2876 //
2877 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
2878 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2879 // CHECK3-NEXT:  entry:
2880 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2881 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2882 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2883 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2884 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2885 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2886 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2887 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2888 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2889 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2890 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2891 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2892 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2893 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2894 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2895 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2896 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2897 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2898 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2899 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
2900 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2901 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2902 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2903 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2904 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2905 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2906 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2907 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2908 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2909 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2910 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
2911 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2912 // CHECK3:       omp.dispatch.cond:
2913 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2914 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2915 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2916 // CHECK3:       omp.dispatch.body:
2917 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2918 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
2919 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2920 // CHECK3:       omp.inner.for.cond:
2921 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
2922 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
2923 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2924 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2925 // CHECK3:       omp.inner.for.body:
2926 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
2927 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2928 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2929 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
2930 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
2931 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !33
2932 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
2933 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
2934 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2935 // CHECK3:       omp.body.continue:
2936 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2937 // CHECK3:       omp.inner.for.inc:
2938 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
2939 // CHECK3-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
2940 // CHECK3-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
2941 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
2942 // CHECK3:       omp.inner.for.end:
2943 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2944 // CHECK3:       omp.dispatch.inc:
2945 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
2946 // CHECK3:       omp.dispatch.end:
2947 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2948 // CHECK3-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2949 // CHECK3-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2950 // CHECK3:       .omp.final.then:
2951 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
2952 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2953 // CHECK3:       .omp.final.done:
2954 // CHECK3-NEXT:    ret void
2955 //
2956 //
2957 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
2958 // CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2959 // CHECK3-NEXT:  entry:
2960 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2961 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2962 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2963 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
2964 // CHECK3-NEXT:    ret void
2965 //
2966 //
2967 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
2968 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
2969 // CHECK3-NEXT:  entry:
2970 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2971 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2972 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2973 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2974 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2975 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2976 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2977 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2978 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2979 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2980 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2981 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2982 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2983 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2984 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2985 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
2986 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2987 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2988 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2989 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2990 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2991 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2992 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2993 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2994 // CHECK3:       cond.true:
2995 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2996 // CHECK3:       cond.false:
2997 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2998 // CHECK3-NEXT:    br label [[COND_END]]
2999 // CHECK3:       cond.end:
3000 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3001 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3002 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3003 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3004 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3005 // CHECK3:       omp.inner.for.cond:
3006 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3007 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
3008 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3009 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3010 // CHECK3:       omp.inner.for.body:
3011 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36
3012 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
3013 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !36
3014 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3015 // CHECK3:       omp.inner.for.inc:
3016 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3017 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36
3018 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3019 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3020 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
3021 // CHECK3:       omp.inner.for.end:
3022 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3023 // CHECK3:       omp.loop.exit:
3024 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3025 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3026 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3027 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3028 // CHECK3:       .omp.final.then:
3029 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
3030 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3031 // CHECK3:       .omp.final.done:
3032 // CHECK3-NEXT:    ret void
3033 //
3034 //
3035 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
3036 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3037 // CHECK3-NEXT:  entry:
3038 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3039 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3040 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3041 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3042 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3043 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3044 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3045 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3046 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3047 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3048 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3049 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3050 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3051 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3052 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3053 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3054 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3055 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3056 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3057 // CHECK3-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
3058 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3059 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3060 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3061 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3062 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3063 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3064 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3065 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3066 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3067 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
3068 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
3069 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
3070 // CHECK3:       omp.dispatch.cond:
3071 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
3072 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3073 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3074 // CHECK3:       omp.dispatch.body:
3075 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3076 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
3077 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3078 // CHECK3:       omp.inner.for.cond:
3079 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
3080 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
3081 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3082 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3083 // CHECK3:       omp.inner.for.body:
3084 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
3085 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3086 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3087 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
3088 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
3089 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !39
3090 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
3091 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !39
3092 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3093 // CHECK3:       omp.body.continue:
3094 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3095 // CHECK3:       omp.inner.for.inc:
3096 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
3097 // CHECK3-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3098 // CHECK3-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
3099 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
3100 // CHECK3:       omp.inner.for.end:
3101 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
3102 // CHECK3:       omp.dispatch.inc:
3103 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
3104 // CHECK3:       omp.dispatch.end:
3105 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3106 // CHECK3-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3107 // CHECK3-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3108 // CHECK3:       .omp.final.then:
3109 // CHECK3-NEXT:    store i32 123, i32* [[I]], align 4
3110 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3111 // CHECK3:       .omp.final.done:
3112 // CHECK3-NEXT:    ret void
3113 //
3114 //
3115 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3116 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
3117 // CHECK3-NEXT:  entry:
3118 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
3119 // CHECK3-NEXT:    ret void
3120 //
3121 //
3122 // CHECK4-LABEL: define {{[^@]+}}@_Z21teams_template_structv
3123 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
3124 // CHECK4-NEXT:  entry:
3125 // CHECK4-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
3126 // CHECK4-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
3127 // CHECK4-NEXT:    ret i32 [[CALL]]
3128 //
3129 //
3130 // CHECK4-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
3131 // CHECK4-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
3132 // CHECK4-NEXT:  entry:
3133 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3134 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
3135 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
3136 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
3137 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3138 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
3139 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
3140 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
3141 // CHECK4-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
3142 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
3143 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
3144 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
3145 // CHECK4-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
3146 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
3147 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
3148 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
3149 // CHECK4-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
3150 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
3151 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
3152 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
3153 // CHECK4-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
3154 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3155 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3156 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
3157 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3158 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
3159 // CHECK4-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
3160 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3161 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
3162 // CHECK4-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
3163 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3164 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3165 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3166 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3167 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
3168 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3169 // CHECK4-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
3170 // CHECK4-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3171 // CHECK4:       omp_offload.failed:
3172 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
3173 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3174 // CHECK4:       omp_offload.cont:
3175 // CHECK4-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
3176 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3177 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
3178 // CHECK4-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
3179 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3180 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
3181 // CHECK4-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
3182 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
3183 // CHECK4-NEXT:    store i8* null, i8** [[TMP13]], align 4
3184 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3185 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3186 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
3187 // CHECK4-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3188 // CHECK4-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
3189 // CHECK4-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
3190 // CHECK4:       omp_offload.failed7:
3191 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
3192 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
3193 // CHECK4:       omp_offload.cont8:
3194 // CHECK4-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
3195 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
3196 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
3197 // CHECK4-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
3198 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
3199 // CHECK4-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
3200 // CHECK4-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
3201 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
3202 // CHECK4-NEXT:    store i8* null, i8** [[TMP22]], align 4
3203 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
3204 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
3205 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
3206 // CHECK4-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3207 // CHECK4-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3208 // CHECK4-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
3209 // CHECK4:       omp_offload.failed14:
3210 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
3211 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
3212 // CHECK4:       omp_offload.cont15:
3213 // CHECK4-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
3214 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
3215 // CHECK4-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
3216 // CHECK4-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
3217 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
3218 // CHECK4-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
3219 // CHECK4-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
3220 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
3221 // CHECK4-NEXT:    store i8* null, i8** [[TMP31]], align 4
3222 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
3223 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
3224 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
3225 // CHECK4-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3226 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
3227 // CHECK4-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
3228 // CHECK4:       omp_offload.failed21:
3229 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
3230 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
3231 // CHECK4:       omp_offload.cont22:
3232 // CHECK4-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
3233 // CHECK4-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
3234 // CHECK4-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
3235 // CHECK4-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
3236 // CHECK4-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
3237 // CHECK4-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
3238 // CHECK4-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
3239 // CHECK4-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
3240 // CHECK4-NEXT:    store i8* null, i8** [[TMP40]], align 4
3241 // CHECK4-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
3242 // CHECK4-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
3243 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
3244 // CHECK4-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3245 // CHECK4-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
3246 // CHECK4-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
3247 // CHECK4:       omp_offload.failed28:
3248 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
3249 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
3250 // CHECK4:       omp_offload.cont29:
3251 // CHECK4-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
3252 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
3253 // CHECK4-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
3254 // CHECK4-NEXT:    ret i32 [[TMP45]]
3255 //
3256 //
3257 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
3258 // CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
3259 // CHECK4-NEXT:  entry:
3260 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3261 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3262 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3263 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
3264 // CHECK4-NEXT:    ret void
3265 //
3266 //
3267 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
3268 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3269 // CHECK4-NEXT:  entry:
3270 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3271 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3272 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3273 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3274 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3275 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3276 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3277 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3278 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3279 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3280 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3281 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3282 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3283 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3284 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3285 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
3286 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3287 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3288 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3289 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3290 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3291 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3292 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3293 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3294 // CHECK4:       cond.true:
3295 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3296 // CHECK4:       cond.false:
3297 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3298 // CHECK4-NEXT:    br label [[COND_END]]
3299 // CHECK4:       cond.end:
3300 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3301 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3302 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3303 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3304 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3305 // CHECK4:       omp.inner.for.cond:
3306 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
3307 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
3308 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3309 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3310 // CHECK4:       omp.inner.for.body:
3311 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !9
3312 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
3313 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !9
3314 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3315 // CHECK4:       omp.inner.for.inc:
3316 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
3317 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !9
3318 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3319 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
3320 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
3321 // CHECK4:       omp.inner.for.end:
3322 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3323 // CHECK4:       omp.loop.exit:
3324 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3325 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3326 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3327 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3328 // CHECK4:       .omp.final.then:
3329 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3330 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3331 // CHECK4:       .omp.final.done:
3332 // CHECK4-NEXT:    ret void
3333 //
3334 //
3335 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
3336 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3337 // CHECK4-NEXT:  entry:
3338 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3339 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3340 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3341 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3342 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3343 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3344 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3345 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3346 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3347 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3348 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3349 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3350 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3351 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3352 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3353 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3354 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3355 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3356 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3357 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
3358 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3359 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3360 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3361 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3362 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3363 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3364 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3365 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
3366 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3367 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3368 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
3369 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3370 // CHECK4:       cond.true:
3371 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3372 // CHECK4:       cond.false:
3373 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3374 // CHECK4-NEXT:    br label [[COND_END]]
3375 // CHECK4:       cond.end:
3376 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
3377 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3378 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3379 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
3380 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3381 // CHECK4:       omp.inner.for.cond:
3382 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
3383 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
3384 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
3385 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3386 // CHECK4:       omp.inner.for.body:
3387 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
3388 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
3389 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3390 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
3391 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
3392 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
3393 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
3394 // CHECK4-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
3395 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3396 // CHECK4:       omp.body.continue:
3397 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3398 // CHECK4:       omp.inner.for.inc:
3399 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
3400 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
3401 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
3402 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
3403 // CHECK4:       omp.inner.for.end:
3404 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3405 // CHECK4:       omp.loop.exit:
3406 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
3407 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3408 // CHECK4-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3409 // CHECK4-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3410 // CHECK4:       .omp.final.then:
3411 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3412 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3413 // CHECK4:       .omp.final.done:
3414 // CHECK4-NEXT:    ret void
3415 //
3416 //
3417 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
3418 // CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3419 // CHECK4-NEXT:  entry:
3420 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3421 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3422 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3423 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
3424 // CHECK4-NEXT:    ret void
3425 //
3426 //
3427 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
3428 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3429 // CHECK4-NEXT:  entry:
3430 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3431 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3432 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3433 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3434 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3435 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3436 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3437 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3438 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3439 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3440 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3441 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3442 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3443 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3444 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3445 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
3446 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3447 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3448 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3449 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3450 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3451 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3452 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3453 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3454 // CHECK4:       cond.true:
3455 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3456 // CHECK4:       cond.false:
3457 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3458 // CHECK4-NEXT:    br label [[COND_END]]
3459 // CHECK4:       cond.end:
3460 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3461 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3462 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3463 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3464 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3465 // CHECK4:       omp.inner.for.cond:
3466 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
3467 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
3468 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3469 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3470 // CHECK4:       omp.inner.for.body:
3471 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
3472 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
3473 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !18
3474 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3475 // CHECK4:       omp.inner.for.inc:
3476 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
3477 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
3478 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3479 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
3480 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
3481 // CHECK4:       omp.inner.for.end:
3482 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3483 // CHECK4:       omp.loop.exit:
3484 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3485 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3486 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3487 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3488 // CHECK4:       .omp.final.then:
3489 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3490 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3491 // CHECK4:       .omp.final.done:
3492 // CHECK4-NEXT:    ret void
3493 //
3494 //
3495 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
3496 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3497 // CHECK4-NEXT:  entry:
3498 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3499 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3500 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3501 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3502 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3503 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3504 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3505 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3506 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3507 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3508 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3509 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3510 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3511 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3512 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3513 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3514 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3515 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3516 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3517 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
3518 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3519 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3520 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3521 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3522 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3523 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3524 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3525 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
3526 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3527 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3528 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
3529 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3530 // CHECK4:       cond.true:
3531 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3532 // CHECK4:       cond.false:
3533 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3534 // CHECK4-NEXT:    br label [[COND_END]]
3535 // CHECK4:       cond.end:
3536 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
3537 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3538 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3539 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
3540 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3541 // CHECK4:       omp.inner.for.cond:
3542 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
3543 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
3544 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
3545 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3546 // CHECK4:       omp.inner.for.body:
3547 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
3548 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
3549 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3550 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
3551 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
3552 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
3553 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
3554 // CHECK4-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
3555 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3556 // CHECK4:       omp.body.continue:
3557 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3558 // CHECK4:       omp.inner.for.inc:
3559 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
3560 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
3561 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
3562 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
3563 // CHECK4:       omp.inner.for.end:
3564 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3565 // CHECK4:       omp.loop.exit:
3566 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
3567 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3568 // CHECK4-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3569 // CHECK4-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3570 // CHECK4:       .omp.final.then:
3571 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3572 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3573 // CHECK4:       .omp.final.done:
3574 // CHECK4-NEXT:    ret void
3575 //
3576 //
3577 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
3578 // CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3579 // CHECK4-NEXT:  entry:
3580 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3581 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3582 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3583 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
3584 // CHECK4-NEXT:    ret void
3585 //
3586 //
3587 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
3588 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3589 // CHECK4-NEXT:  entry:
3590 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3591 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3592 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3593 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3594 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3595 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3596 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3597 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3598 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3599 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3600 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3601 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3602 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3603 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3604 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3605 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
3606 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3607 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3608 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3609 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3610 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3611 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3612 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3613 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3614 // CHECK4:       cond.true:
3615 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3616 // CHECK4:       cond.false:
3617 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3618 // CHECK4-NEXT:    br label [[COND_END]]
3619 // CHECK4:       cond.end:
3620 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3621 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3622 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3623 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3624 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3625 // CHECK4:       omp.inner.for.cond:
3626 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
3627 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
3628 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3629 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3630 // CHECK4:       omp.inner.for.body:
3631 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24
3632 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
3633 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !24
3634 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3635 // CHECK4:       omp.inner.for.inc:
3636 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
3637 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24
3638 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3639 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
3640 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
3641 // CHECK4:       omp.inner.for.end:
3642 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3643 // CHECK4:       omp.loop.exit:
3644 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3645 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3646 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3647 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3648 // CHECK4:       .omp.final.then:
3649 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3650 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3651 // CHECK4:       .omp.final.done:
3652 // CHECK4-NEXT:    ret void
3653 //
3654 //
3655 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
3656 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3657 // CHECK4-NEXT:  entry:
3658 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3659 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3660 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3661 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3662 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3663 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3664 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3665 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3666 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3667 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3668 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3669 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3670 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3671 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3672 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3673 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3674 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3675 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3676 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3677 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
3678 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3679 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3680 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3681 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3682 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3683 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3684 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3685 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
3686 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
3687 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
3688 // CHECK4:       omp.dispatch.cond:
3689 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3690 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3691 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
3692 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3693 // CHECK4:       cond.true:
3694 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3695 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3696 // CHECK4:       cond.false:
3697 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3698 // CHECK4-NEXT:    br label [[COND_END]]
3699 // CHECK4:       cond.end:
3700 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
3701 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3702 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3703 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
3704 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3705 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3706 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
3707 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3708 // CHECK4:       omp.dispatch.body:
3709 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3710 // CHECK4:       omp.inner.for.cond:
3711 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3712 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
3713 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
3714 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3715 // CHECK4:       omp.inner.for.body:
3716 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3717 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
3718 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3719 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
3720 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
3721 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27
3722 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
3723 // CHECK4-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
3724 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3725 // CHECK4:       omp.body.continue:
3726 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3727 // CHECK4:       omp.inner.for.inc:
3728 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3729 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
3730 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3731 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
3732 // CHECK4:       omp.inner.for.end:
3733 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
3734 // CHECK4:       omp.dispatch.inc:
3735 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3736 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3737 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
3738 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
3739 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3740 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3741 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3742 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
3743 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
3744 // CHECK4:       omp.dispatch.end:
3745 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
3746 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3747 // CHECK4-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
3748 // CHECK4-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3749 // CHECK4:       .omp.final.then:
3750 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3751 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3752 // CHECK4:       .omp.final.done:
3753 // CHECK4-NEXT:    ret void
3754 //
3755 //
3756 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
3757 // CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3758 // CHECK4-NEXT:  entry:
3759 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3760 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3761 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3762 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
3763 // CHECK4-NEXT:    ret void
3764 //
3765 //
3766 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
3767 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3768 // CHECK4-NEXT:  entry:
3769 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3770 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3771 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3772 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3773 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3774 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3775 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3776 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3777 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3778 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3779 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3780 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3781 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3782 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3783 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3784 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
3785 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3786 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3787 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3788 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3789 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3790 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3791 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3792 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3793 // CHECK4:       cond.true:
3794 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3795 // CHECK4:       cond.false:
3796 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3797 // CHECK4-NEXT:    br label [[COND_END]]
3798 // CHECK4:       cond.end:
3799 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3800 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3801 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3802 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3803 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3804 // CHECK4:       omp.inner.for.cond:
3805 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
3806 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
3807 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3808 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3809 // CHECK4:       omp.inner.for.body:
3810 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30
3811 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
3812 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !30
3813 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3814 // CHECK4:       omp.inner.for.inc:
3815 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
3816 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30
3817 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3818 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
3819 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
3820 // CHECK4:       omp.inner.for.end:
3821 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3822 // CHECK4:       omp.loop.exit:
3823 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3824 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3825 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3826 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3827 // CHECK4:       .omp.final.then:
3828 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3829 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3830 // CHECK4:       .omp.final.done:
3831 // CHECK4-NEXT:    ret void
3832 //
3833 //
3834 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
3835 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3836 // CHECK4-NEXT:  entry:
3837 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3838 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3839 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3840 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3841 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3842 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3843 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3844 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3845 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3846 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3847 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3848 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3849 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3850 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3851 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3852 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3853 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3854 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3855 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3856 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
3857 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3858 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3859 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3860 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3861 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3862 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3863 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3864 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3865 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3866 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
3867 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
3868 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
3869 // CHECK4:       omp.dispatch.cond:
3870 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
3871 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3872 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3873 // CHECK4:       omp.dispatch.body:
3874 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3875 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
3876 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3877 // CHECK4:       omp.inner.for.cond:
3878 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
3879 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
3880 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3881 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3882 // CHECK4:       omp.inner.for.body:
3883 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
3884 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3885 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3886 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
3887 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
3888 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !33
3889 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
3890 // CHECK4-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
3891 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3892 // CHECK4:       omp.body.continue:
3893 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3894 // CHECK4:       omp.inner.for.inc:
3895 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
3896 // CHECK4-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3897 // CHECK4-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
3898 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
3899 // CHECK4:       omp.inner.for.end:
3900 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
3901 // CHECK4:       omp.dispatch.inc:
3902 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
3903 // CHECK4:       omp.dispatch.end:
3904 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3905 // CHECK4-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3906 // CHECK4-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3907 // CHECK4:       .omp.final.then:
3908 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3909 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3910 // CHECK4:       .omp.final.done:
3911 // CHECK4-NEXT:    ret void
3912 //
3913 //
3914 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
3915 // CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3916 // CHECK4-NEXT:  entry:
3917 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3918 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3919 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3920 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
3921 // CHECK4-NEXT:    ret void
3922 //
3923 //
3924 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14
3925 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3926 // CHECK4-NEXT:  entry:
3927 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3928 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3929 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
3930 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3931 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3932 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3933 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3934 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3935 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3936 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
3937 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3938 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3939 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
3940 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
3941 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3942 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
3943 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3944 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3945 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3946 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3947 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3948 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3949 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3950 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3951 // CHECK4:       cond.true:
3952 // CHECK4-NEXT:    br label [[COND_END:%.*]]
3953 // CHECK4:       cond.false:
3954 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3955 // CHECK4-NEXT:    br label [[COND_END]]
3956 // CHECK4:       cond.end:
3957 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3958 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3959 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3960 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3961 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3962 // CHECK4:       omp.inner.for.cond:
3963 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3964 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
3965 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3966 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3967 // CHECK4:       omp.inner.for.body:
3968 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36
3969 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
3970 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !36
3971 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3972 // CHECK4:       omp.inner.for.inc:
3973 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3974 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36
3975 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3976 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
3977 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
3978 // CHECK4:       omp.inner.for.end:
3979 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3980 // CHECK4:       omp.loop.exit:
3981 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3982 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3983 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3984 // CHECK4-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3985 // CHECK4:       .omp.final.then:
3986 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
3987 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3988 // CHECK4:       .omp.final.done:
3989 // CHECK4-NEXT:    ret void
3990 //
3991 //
3992 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15
3993 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
3994 // CHECK4-NEXT:  entry:
3995 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3996 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3997 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3998 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3999 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
4000 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4001 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4002 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4003 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4004 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4005 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4006 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
4007 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4008 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4009 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4010 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4011 // CHECK4-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
4012 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
4013 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4014 // CHECK4-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4015 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4016 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4017 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
4018 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
4019 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4020 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4021 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4022 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4023 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4024 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4025 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
4026 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4027 // CHECK4:       omp.dispatch.cond:
4028 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4029 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4030 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4031 // CHECK4:       omp.dispatch.body:
4032 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4033 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
4034 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4035 // CHECK4:       omp.inner.for.cond:
4036 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
4037 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
4038 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4039 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4040 // CHECK4:       omp.inner.for.body:
4041 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
4042 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4043 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4044 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
4045 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
4046 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !39
4047 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
4048 // CHECK4-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !39
4049 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4050 // CHECK4:       omp.body.continue:
4051 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4052 // CHECK4:       omp.inner.for.inc:
4053 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
4054 // CHECK4-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
4055 // CHECK4-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
4056 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
4057 // CHECK4:       omp.inner.for.end:
4058 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4059 // CHECK4:       omp.dispatch.inc:
4060 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
4061 // CHECK4:       omp.dispatch.end:
4062 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4063 // CHECK4-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4064 // CHECK4-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4065 // CHECK4:       .omp.final.then:
4066 // CHECK4-NEXT:    store i32 123, i32* [[I]], align 4
4067 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4068 // CHECK4:       .omp.final.done:
4069 // CHECK4-NEXT:    ret void
4070 //
4071 //
4072 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4073 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
4074 // CHECK4-NEXT:  entry:
4075 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
4076 // CHECK4-NEXT:    ret void
4077 //
4078 //
4079 // CHECK5-LABEL: define {{[^@]+}}@_Z21teams_template_structv
4080 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
4081 // CHECK5-NEXT:  entry:
4082 // CHECK5-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
4083 // CHECK5-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
4084 // CHECK5-NEXT:    ret i32 [[CALL]]
4085 //
4086 //
4087 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
4088 // CHECK5-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
4089 // CHECK5-NEXT:  entry:
4090 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4091 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
4092 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
4093 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
4094 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4095 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
4096 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
4097 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
4098 // CHECK5-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
4099 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
4100 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
4101 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
4102 // CHECK5-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
4103 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
4104 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
4105 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
4106 // CHECK5-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
4107 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
4108 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
4109 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
4110 // CHECK5-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
4111 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4112 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4113 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
4114 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4115 // CHECK5-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
4116 // CHECK5-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
4117 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4118 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
4119 // CHECK5-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
4120 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
4121 // CHECK5-NEXT:    store i8* null, i8** [[TMP4]], align 8
4122 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4123 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4124 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
4125 // CHECK5-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4126 // CHECK5-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
4127 // CHECK5-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4128 // CHECK5:       omp_offload.failed:
4129 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
4130 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4131 // CHECK5:       omp_offload.cont:
4132 // CHECK5-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
4133 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
4134 // CHECK5-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
4135 // CHECK5-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
4136 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
4137 // CHECK5-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
4138 // CHECK5-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
4139 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
4140 // CHECK5-NEXT:    store i8* null, i8** [[TMP13]], align 8
4141 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
4142 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
4143 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
4144 // CHECK5-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4145 // CHECK5-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
4146 // CHECK5-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
4147 // CHECK5:       omp_offload.failed7:
4148 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
4149 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
4150 // CHECK5:       omp_offload.cont8:
4151 // CHECK5-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
4152 // CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
4153 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
4154 // CHECK5-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
4155 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
4156 // CHECK5-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
4157 // CHECK5-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
4158 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
4159 // CHECK5-NEXT:    store i8* null, i8** [[TMP22]], align 8
4160 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
4161 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
4162 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
4163 // CHECK5-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4164 // CHECK5-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4165 // CHECK5-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
4166 // CHECK5:       omp_offload.failed14:
4167 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
4168 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
4169 // CHECK5:       omp_offload.cont15:
4170 // CHECK5-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
4171 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
4172 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
4173 // CHECK5-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
4174 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
4175 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
4176 // CHECK5-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
4177 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
4178 // CHECK5-NEXT:    store i8* null, i8** [[TMP31]], align 8
4179 // CHECK5-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
4180 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
4181 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
4182 // CHECK5-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4183 // CHECK5-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4184 // CHECK5-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
4185 // CHECK5:       omp_offload.failed21:
4186 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
4187 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
4188 // CHECK5:       omp_offload.cont22:
4189 // CHECK5-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
4190 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
4191 // CHECK5-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
4192 // CHECK5-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
4193 // CHECK5-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
4194 // CHECK5-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
4195 // CHECK5-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
4196 // CHECK5-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
4197 // CHECK5-NEXT:    store i8* null, i8** [[TMP40]], align 8
4198 // CHECK5-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
4199 // CHECK5-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
4200 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
4201 // CHECK5-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4202 // CHECK5-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
4203 // CHECK5-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
4204 // CHECK5:       omp_offload.failed28:
4205 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
4206 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
4207 // CHECK5:       omp_offload.cont29:
4208 // CHECK5-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
4209 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
4210 // CHECK5-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
4211 // CHECK5-NEXT:    ret i32 [[TMP45]]
4212 //
4213 //
4214 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
4215 // CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
4216 // CHECK5-NEXT:  entry:
4217 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4218 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4219 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4220 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
4221 // CHECK5-NEXT:    ret void
4222 //
4223 //
4224 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
4225 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4226 // CHECK5-NEXT:  entry:
4227 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4228 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4229 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4230 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4231 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4232 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4233 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4234 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4235 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4236 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4237 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4238 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4239 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4240 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4241 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4242 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
4243 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4244 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4245 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4246 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4247 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4248 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4249 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4250 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4251 // CHECK5:       cond.true:
4252 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4253 // CHECK5:       cond.false:
4254 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4255 // CHECK5-NEXT:    br label [[COND_END]]
4256 // CHECK5:       cond.end:
4257 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4258 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4259 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4260 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4261 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4262 // CHECK5:       omp.inner.for.cond:
4263 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
4264 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
4265 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4266 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4267 // CHECK5:       omp.inner.for.body:
4268 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !8
4269 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4270 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
4271 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4272 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !8
4273 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4274 // CHECK5:       omp.inner.for.inc:
4275 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
4276 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !8
4277 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4278 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
4279 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
4280 // CHECK5:       omp.inner.for.end:
4281 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4282 // CHECK5:       omp.loop.exit:
4283 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4284 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4285 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4286 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4287 // CHECK5:       .omp.final.then:
4288 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4289 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4290 // CHECK5:       .omp.final.done:
4291 // CHECK5-NEXT:    ret void
4292 //
4293 //
4294 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
4295 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4296 // CHECK5-NEXT:  entry:
4297 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4298 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4299 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4300 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4301 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4302 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4303 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4304 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4305 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4306 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4307 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4308 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4309 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4310 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4311 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4312 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4313 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4314 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4315 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4316 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4317 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4318 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
4319 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4320 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
4321 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4322 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
4323 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4324 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4325 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4326 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
4327 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4328 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4329 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
4330 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4331 // CHECK5:       cond.true:
4332 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4333 // CHECK5:       cond.false:
4334 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4335 // CHECK5-NEXT:    br label [[COND_END]]
4336 // CHECK5:       cond.end:
4337 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
4338 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4339 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4340 // CHECK5-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
4341 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4342 // CHECK5:       omp.inner.for.cond:
4343 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
4344 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
4345 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
4346 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4347 // CHECK5:       omp.inner.for.body:
4348 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
4349 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
4350 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4351 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
4352 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
4353 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
4354 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
4355 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
4356 // CHECK5-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12
4357 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4358 // CHECK5:       omp.body.continue:
4359 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4360 // CHECK5:       omp.inner.for.inc:
4361 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
4362 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
4363 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
4364 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
4365 // CHECK5:       omp.inner.for.end:
4366 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4367 // CHECK5:       omp.loop.exit:
4368 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
4369 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4370 // CHECK5-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4371 // CHECK5-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4372 // CHECK5:       .omp.final.then:
4373 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4374 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4375 // CHECK5:       .omp.final.done:
4376 // CHECK5-NEXT:    ret void
4377 //
4378 //
4379 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
4380 // CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4381 // CHECK5-NEXT:  entry:
4382 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4383 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4384 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4385 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
4386 // CHECK5-NEXT:    ret void
4387 //
4388 //
4389 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
4390 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4391 // CHECK5-NEXT:  entry:
4392 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4393 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4394 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4395 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4396 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4397 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4398 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4399 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4400 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4401 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4402 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4403 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4404 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4405 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4406 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4407 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
4408 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4409 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4410 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4411 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4412 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4413 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4414 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4415 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4416 // CHECK5:       cond.true:
4417 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4418 // CHECK5:       cond.false:
4419 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4420 // CHECK5-NEXT:    br label [[COND_END]]
4421 // CHECK5:       cond.end:
4422 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4423 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4424 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4425 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4426 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4427 // CHECK5:       omp.inner.for.cond:
4428 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
4429 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
4430 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4431 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4432 // CHECK5:       omp.inner.for.body:
4433 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
4434 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4435 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
4436 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4437 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !17
4438 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4439 // CHECK5:       omp.inner.for.inc:
4440 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
4441 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
4442 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4443 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
4444 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
4445 // CHECK5:       omp.inner.for.end:
4446 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4447 // CHECK5:       omp.loop.exit:
4448 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4449 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4450 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4451 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4452 // CHECK5:       .omp.final.then:
4453 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4454 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4455 // CHECK5:       .omp.final.done:
4456 // CHECK5-NEXT:    ret void
4457 //
4458 //
4459 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
4460 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4461 // CHECK5-NEXT:  entry:
4462 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4463 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4464 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4465 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4466 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4467 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4468 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4469 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4470 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4471 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4472 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4473 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4474 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4475 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4476 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4477 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4478 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4479 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4480 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4481 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4482 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4483 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
4484 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4485 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
4486 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4487 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
4488 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4489 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4490 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4491 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
4492 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4493 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4494 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
4495 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4496 // CHECK5:       cond.true:
4497 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4498 // CHECK5:       cond.false:
4499 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4500 // CHECK5-NEXT:    br label [[COND_END]]
4501 // CHECK5:       cond.end:
4502 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
4503 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4504 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4505 // CHECK5-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
4506 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4507 // CHECK5:       omp.inner.for.cond:
4508 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
4509 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
4510 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
4511 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4512 // CHECK5:       omp.inner.for.body:
4513 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
4514 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
4515 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4516 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !20
4517 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
4518 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !20
4519 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
4520 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
4521 // CHECK5-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !20
4522 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4523 // CHECK5:       omp.body.continue:
4524 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4525 // CHECK5:       omp.inner.for.inc:
4526 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
4527 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
4528 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
4529 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
4530 // CHECK5:       omp.inner.for.end:
4531 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4532 // CHECK5:       omp.loop.exit:
4533 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
4534 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4535 // CHECK5-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4536 // CHECK5-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4537 // CHECK5:       .omp.final.then:
4538 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4539 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4540 // CHECK5:       .omp.final.done:
4541 // CHECK5-NEXT:    ret void
4542 //
4543 //
4544 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
4545 // CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4546 // CHECK5-NEXT:  entry:
4547 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4548 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4549 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4550 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
4551 // CHECK5-NEXT:    ret void
4552 //
4553 //
4554 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
4555 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4556 // CHECK5-NEXT:  entry:
4557 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4558 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4559 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4560 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4561 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4562 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4563 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4564 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4565 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4566 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4567 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4568 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4569 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4570 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4571 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4572 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
4573 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4574 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4575 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4576 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4577 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4578 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4579 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4580 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4581 // CHECK5:       cond.true:
4582 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4583 // CHECK5:       cond.false:
4584 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4585 // CHECK5-NEXT:    br label [[COND_END]]
4586 // CHECK5:       cond.end:
4587 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4588 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4589 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4590 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4591 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4592 // CHECK5:       omp.inner.for.cond:
4593 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
4594 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
4595 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4596 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4597 // CHECK5:       omp.inner.for.body:
4598 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
4599 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4600 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
4601 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4602 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !23
4603 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4604 // CHECK5:       omp.inner.for.inc:
4605 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
4606 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
4607 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4608 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
4609 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
4610 // CHECK5:       omp.inner.for.end:
4611 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4612 // CHECK5:       omp.loop.exit:
4613 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4614 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4615 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4616 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4617 // CHECK5:       .omp.final.then:
4618 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4619 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4620 // CHECK5:       .omp.final.done:
4621 // CHECK5-NEXT:    ret void
4622 //
4623 //
4624 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7
4625 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4626 // CHECK5-NEXT:  entry:
4627 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4628 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4629 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4630 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4631 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4632 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4633 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4634 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4635 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4636 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4637 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4638 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4639 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4640 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4641 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4642 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4643 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4644 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4645 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4646 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4647 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4648 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
4649 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4650 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
4651 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4652 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
4653 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4654 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4655 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4656 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
4657 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
4658 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4659 // CHECK5:       omp.dispatch.cond:
4660 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4661 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4662 // CHECK5-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
4663 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
4664 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4665 // CHECK5:       cond.true:
4666 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4667 // CHECK5-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
4668 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4669 // CHECK5:       cond.false:
4670 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4671 // CHECK5-NEXT:    br label [[COND_END]]
4672 // CHECK5:       cond.end:
4673 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
4674 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4675 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4676 // CHECK5-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
4677 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4678 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4679 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
4680 // CHECK5-NEXT:    br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4681 // CHECK5:       omp.dispatch.body:
4682 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4683 // CHECK5:       omp.inner.for.cond:
4684 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
4685 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
4686 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
4687 // CHECK5-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4688 // CHECK5:       omp.inner.for.body:
4689 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
4690 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
4691 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4692 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !26
4693 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
4694 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !26
4695 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
4696 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
4697 // CHECK5-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
4698 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4699 // CHECK5:       omp.body.continue:
4700 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4701 // CHECK5:       omp.inner.for.inc:
4702 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
4703 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
4704 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
4705 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
4706 // CHECK5:       omp.inner.for.end:
4707 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4708 // CHECK5:       omp.dispatch.inc:
4709 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4710 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4711 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
4712 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
4713 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4714 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4715 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4716 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
4717 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
4718 // CHECK5:       omp.dispatch.end:
4719 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
4720 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4721 // CHECK5-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
4722 // CHECK5-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4723 // CHECK5:       .omp.final.then:
4724 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4725 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4726 // CHECK5:       .omp.final.done:
4727 // CHECK5-NEXT:    ret void
4728 //
4729 //
4730 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
4731 // CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4732 // CHECK5-NEXT:  entry:
4733 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4734 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4735 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4736 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
4737 // CHECK5-NEXT:    ret void
4738 //
4739 //
4740 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
4741 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4742 // CHECK5-NEXT:  entry:
4743 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4744 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4745 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4746 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4747 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4748 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4749 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4750 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4751 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4752 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4753 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4754 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4755 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4756 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4757 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4758 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
4759 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4760 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4761 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4762 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4763 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4764 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4765 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4766 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4767 // CHECK5:       cond.true:
4768 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4769 // CHECK5:       cond.false:
4770 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4771 // CHECK5-NEXT:    br label [[COND_END]]
4772 // CHECK5:       cond.end:
4773 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4774 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4775 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4776 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4777 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4778 // CHECK5:       omp.inner.for.cond:
4779 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
4780 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
4781 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4782 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4783 // CHECK5:       omp.inner.for.body:
4784 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
4785 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4786 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
4787 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4788 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !29
4789 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4790 // CHECK5:       omp.inner.for.inc:
4791 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
4792 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
4793 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4794 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
4795 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
4796 // CHECK5:       omp.inner.for.end:
4797 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4798 // CHECK5:       omp.loop.exit:
4799 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4800 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4801 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4802 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4803 // CHECK5:       .omp.final.then:
4804 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4805 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4806 // CHECK5:       .omp.final.done:
4807 // CHECK5-NEXT:    ret void
4808 //
4809 //
4810 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11
4811 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4812 // CHECK5-NEXT:  entry:
4813 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4814 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4815 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4816 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4817 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4818 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4819 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4820 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4821 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4822 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4823 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4824 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4825 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4826 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4827 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4828 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4829 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4830 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4831 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4832 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4833 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4834 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
4835 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4836 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
4837 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4838 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
4839 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4840 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4841 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4842 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4843 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4844 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4845 // CHECK5-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
4846 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4847 // CHECK5:       omp.dispatch.cond:
4848 // CHECK5-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4849 // CHECK5-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4850 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4851 // CHECK5:       omp.dispatch.body:
4852 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4853 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
4854 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4855 // CHECK5:       omp.inner.for.cond:
4856 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
4857 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
4858 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4859 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4860 // CHECK5:       omp.inner.for.body:
4861 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
4862 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4863 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4864 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
4865 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
4866 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !32
4867 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
4868 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
4869 // CHECK5-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
4870 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4871 // CHECK5:       omp.body.continue:
4872 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4873 // CHECK5:       omp.inner.for.inc:
4874 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
4875 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
4876 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
4877 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
4878 // CHECK5:       omp.inner.for.end:
4879 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4880 // CHECK5:       omp.dispatch.inc:
4881 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
4882 // CHECK5:       omp.dispatch.end:
4883 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4884 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4885 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4886 // CHECK5:       .omp.final.then:
4887 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4888 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4889 // CHECK5:       .omp.final.done:
4890 // CHECK5-NEXT:    ret void
4891 //
4892 //
4893 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
4894 // CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4895 // CHECK5-NEXT:  entry:
4896 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4897 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4898 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4899 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
4900 // CHECK5-NEXT:    ret void
4901 //
4902 //
4903 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..14
4904 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4905 // CHECK5-NEXT:  entry:
4906 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4907 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4908 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4909 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4910 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4911 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4912 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4913 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4914 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4915 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4916 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4917 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4918 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4919 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4920 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4921 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
4922 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4923 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4924 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4925 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4926 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4927 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4928 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4929 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4930 // CHECK5:       cond.true:
4931 // CHECK5-NEXT:    br label [[COND_END:%.*]]
4932 // CHECK5:       cond.false:
4933 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4934 // CHECK5-NEXT:    br label [[COND_END]]
4935 // CHECK5:       cond.end:
4936 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4937 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4938 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4939 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4940 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4941 // CHECK5:       omp.inner.for.cond:
4942 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
4943 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
4944 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4945 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4946 // CHECK5:       omp.inner.for.body:
4947 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
4948 // CHECK5-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4949 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
4950 // CHECK5-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4951 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !35
4952 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4953 // CHECK5:       omp.inner.for.inc:
4954 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
4955 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
4956 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4957 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
4958 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
4959 // CHECK5:       omp.inner.for.end:
4960 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4961 // CHECK5:       omp.loop.exit:
4962 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4963 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4964 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4965 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4966 // CHECK5:       .omp.final.then:
4967 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
4968 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4969 // CHECK5:       .omp.final.done:
4970 // CHECK5-NEXT:    ret void
4971 //
4972 //
4973 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..15
4974 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
4975 // CHECK5-NEXT:  entry:
4976 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4977 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4978 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4979 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4980 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
4981 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4982 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4983 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4984 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4985 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4986 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4987 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
4988 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4989 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4990 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4991 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4992 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
4993 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
4994 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4995 // CHECK5-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
4996 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4997 // CHECK5-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
4998 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4999 // CHECK5-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5000 // CHECK5-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5001 // CHECK5-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5002 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5003 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5004 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5005 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5006 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5007 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5008 // CHECK5-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
5009 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5010 // CHECK5:       omp.dispatch.cond:
5011 // CHECK5-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
5012 // CHECK5-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
5013 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5014 // CHECK5:       omp.dispatch.body:
5015 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5016 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
5017 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5018 // CHECK5:       omp.inner.for.cond:
5019 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5020 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
5021 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
5022 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5023 // CHECK5:       omp.inner.for.body:
5024 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5025 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
5026 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5027 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
5028 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
5029 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !38
5030 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
5031 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
5032 // CHECK5-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
5033 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5034 // CHECK5:       omp.body.continue:
5035 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5036 // CHECK5:       omp.inner.for.inc:
5037 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5038 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
5039 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5040 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
5041 // CHECK5:       omp.inner.for.end:
5042 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
5043 // CHECK5:       omp.dispatch.inc:
5044 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
5045 // CHECK5:       omp.dispatch.end:
5046 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5047 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5048 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5049 // CHECK5:       .omp.final.then:
5050 // CHECK5-NEXT:    store i32 123, i32* [[I]], align 4
5051 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5052 // CHECK5:       .omp.final.done:
5053 // CHECK5-NEXT:    ret void
5054 //
5055 //
5056 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
5057 // CHECK5-SAME: () #[[ATTR3:[0-9]+]] {
5058 // CHECK5-NEXT:  entry:
5059 // CHECK5-NEXT:    call void @__tgt_register_requires(i64 1)
5060 // CHECK5-NEXT:    ret void
5061 //
5062 //
5063 // CHECK6-LABEL: define {{[^@]+}}@_Z21teams_template_structv
5064 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
5065 // CHECK6-NEXT:  entry:
5066 // CHECK6-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
5067 // CHECK6-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
5068 // CHECK6-NEXT:    ret i32 [[CALL]]
5069 //
5070 //
5071 // CHECK6-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
5072 // CHECK6-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
5073 // CHECK6-NEXT:  entry:
5074 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5075 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
5076 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
5077 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
5078 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5079 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
5080 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
5081 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
5082 // CHECK6-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
5083 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
5084 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
5085 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
5086 // CHECK6-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
5087 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
5088 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
5089 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
5090 // CHECK6-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
5091 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
5092 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
5093 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
5094 // CHECK6-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
5095 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5096 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5097 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
5098 // CHECK6-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5099 // CHECK6-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
5100 // CHECK6-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
5101 // CHECK6-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5102 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
5103 // CHECK6-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
5104 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
5105 // CHECK6-NEXT:    store i8* null, i8** [[TMP4]], align 8
5106 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5107 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5108 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
5109 // CHECK6-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5110 // CHECK6-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
5111 // CHECK6-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5112 // CHECK6:       omp_offload.failed:
5113 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
5114 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5115 // CHECK6:       omp_offload.cont:
5116 // CHECK6-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
5117 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
5118 // CHECK6-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
5119 // CHECK6-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
5120 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
5121 // CHECK6-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
5122 // CHECK6-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
5123 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
5124 // CHECK6-NEXT:    store i8* null, i8** [[TMP13]], align 8
5125 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
5126 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
5127 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
5128 // CHECK6-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5129 // CHECK6-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
5130 // CHECK6-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
5131 // CHECK6:       omp_offload.failed7:
5132 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
5133 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
5134 // CHECK6:       omp_offload.cont8:
5135 // CHECK6-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
5136 // CHECK6-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
5137 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
5138 // CHECK6-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
5139 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
5140 // CHECK6-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
5141 // CHECK6-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
5142 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
5143 // CHECK6-NEXT:    store i8* null, i8** [[TMP22]], align 8
5144 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
5145 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
5146 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
5147 // CHECK6-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5148 // CHECK6-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5149 // CHECK6-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
5150 // CHECK6:       omp_offload.failed14:
5151 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
5152 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
5153 // CHECK6:       omp_offload.cont15:
5154 // CHECK6-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
5155 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
5156 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
5157 // CHECK6-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
5158 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
5159 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
5160 // CHECK6-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
5161 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
5162 // CHECK6-NEXT:    store i8* null, i8** [[TMP31]], align 8
5163 // CHECK6-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
5164 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
5165 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
5166 // CHECK6-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5167 // CHECK6-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
5168 // CHECK6-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
5169 // CHECK6:       omp_offload.failed21:
5170 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
5171 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
5172 // CHECK6:       omp_offload.cont22:
5173 // CHECK6-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
5174 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
5175 // CHECK6-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
5176 // CHECK6-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
5177 // CHECK6-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
5178 // CHECK6-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
5179 // CHECK6-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
5180 // CHECK6-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
5181 // CHECK6-NEXT:    store i8* null, i8** [[TMP40]], align 8
5182 // CHECK6-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
5183 // CHECK6-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
5184 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
5185 // CHECK6-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5186 // CHECK6-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
5187 // CHECK6-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
5188 // CHECK6:       omp_offload.failed28:
5189 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
5190 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
5191 // CHECK6:       omp_offload.cont29:
5192 // CHECK6-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
5193 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
5194 // CHECK6-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
5195 // CHECK6-NEXT:    ret i32 [[TMP45]]
5196 //
5197 //
5198 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
5199 // CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
5200 // CHECK6-NEXT:  entry:
5201 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5202 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5203 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5204 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
5205 // CHECK6-NEXT:    ret void
5206 //
5207 //
5208 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
5209 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5210 // CHECK6-NEXT:  entry:
5211 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5212 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5213 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5214 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5215 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5216 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5217 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5218 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5219 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5220 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5221 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5222 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5223 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5224 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5225 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5226 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
5227 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5228 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5229 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5230 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5231 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5232 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5233 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
5234 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5235 // CHECK6:       cond.true:
5236 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5237 // CHECK6:       cond.false:
5238 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5239 // CHECK6-NEXT:    br label [[COND_END]]
5240 // CHECK6:       cond.end:
5241 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5242 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5243 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5244 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5245 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5246 // CHECK6:       omp.inner.for.cond:
5247 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
5248 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
5249 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5250 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5251 // CHECK6:       omp.inner.for.body:
5252 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !8
5253 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5254 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !8
5255 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5256 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !8
5257 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5258 // CHECK6:       omp.inner.for.inc:
5259 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
5260 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !8
5261 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5262 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !8
5263 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
5264 // CHECK6:       omp.inner.for.end:
5265 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5266 // CHECK6:       omp.loop.exit:
5267 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5268 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5269 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5270 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5271 // CHECK6:       .omp.final.then:
5272 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5273 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5274 // CHECK6:       .omp.final.done:
5275 // CHECK6-NEXT:    ret void
5276 //
5277 //
5278 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
5279 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5280 // CHECK6-NEXT:  entry:
5281 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5282 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5283 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5284 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5285 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5286 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5287 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5288 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5289 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5290 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5291 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5292 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5293 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5294 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5295 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5296 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5297 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5298 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5299 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5300 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
5301 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5302 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
5303 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5304 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5305 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5306 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5307 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5308 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5309 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5310 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
5311 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5312 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5313 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
5314 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5315 // CHECK6:       cond.true:
5316 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5317 // CHECK6:       cond.false:
5318 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5319 // CHECK6-NEXT:    br label [[COND_END]]
5320 // CHECK6:       cond.end:
5321 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
5322 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5323 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5324 // CHECK6-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
5325 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5326 // CHECK6:       omp.inner.for.cond:
5327 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
5328 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
5329 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
5330 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5331 // CHECK6:       omp.inner.for.body:
5332 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
5333 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
5334 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5335 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
5336 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
5337 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
5338 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
5339 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
5340 // CHECK6-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12
5341 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5342 // CHECK6:       omp.body.continue:
5343 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5344 // CHECK6:       omp.inner.for.inc:
5345 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
5346 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
5347 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
5348 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
5349 // CHECK6:       omp.inner.for.end:
5350 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5351 // CHECK6:       omp.loop.exit:
5352 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
5353 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5354 // CHECK6-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5355 // CHECK6-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5356 // CHECK6:       .omp.final.then:
5357 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5358 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5359 // CHECK6:       .omp.final.done:
5360 // CHECK6-NEXT:    ret void
5361 //
5362 //
5363 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
5364 // CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5365 // CHECK6-NEXT:  entry:
5366 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5367 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5368 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5369 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
5370 // CHECK6-NEXT:    ret void
5371 //
5372 //
5373 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
5374 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5375 // CHECK6-NEXT:  entry:
5376 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5377 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5378 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5379 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5380 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5381 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5382 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5383 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5384 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5385 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5386 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5387 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5388 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5389 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5390 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5391 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
5392 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5393 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5394 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5395 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5396 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5397 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5398 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
5399 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5400 // CHECK6:       cond.true:
5401 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5402 // CHECK6:       cond.false:
5403 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5404 // CHECK6-NEXT:    br label [[COND_END]]
5405 // CHECK6:       cond.end:
5406 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5407 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5408 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5409 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5410 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5411 // CHECK6:       omp.inner.for.cond:
5412 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5413 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5414 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5415 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5416 // CHECK6:       omp.inner.for.body:
5417 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
5418 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5419 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5420 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5421 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !17
5422 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5423 // CHECK6:       omp.inner.for.inc:
5424 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5425 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
5426 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5427 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5428 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
5429 // CHECK6:       omp.inner.for.end:
5430 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5431 // CHECK6:       omp.loop.exit:
5432 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5433 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5434 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5435 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5436 // CHECK6:       .omp.final.then:
5437 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5438 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5439 // CHECK6:       .omp.final.done:
5440 // CHECK6-NEXT:    ret void
5441 //
5442 //
5443 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
5444 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5445 // CHECK6-NEXT:  entry:
5446 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5447 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5448 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5449 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5450 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5451 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5452 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5453 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5454 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5455 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5456 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5457 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5458 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5459 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5460 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5461 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5462 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5463 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5464 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5465 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
5466 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5467 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
5468 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5469 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5470 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5471 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5472 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5473 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5474 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5475 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
5476 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5477 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5478 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
5479 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5480 // CHECK6:       cond.true:
5481 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5482 // CHECK6:       cond.false:
5483 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5484 // CHECK6-NEXT:    br label [[COND_END]]
5485 // CHECK6:       cond.end:
5486 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
5487 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5488 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5489 // CHECK6-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
5490 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5491 // CHECK6:       omp.inner.for.cond:
5492 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5493 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
5494 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
5495 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5496 // CHECK6:       omp.inner.for.body:
5497 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5498 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
5499 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5500 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !20
5501 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
5502 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !20
5503 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
5504 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
5505 // CHECK6-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !20
5506 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5507 // CHECK6:       omp.body.continue:
5508 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5509 // CHECK6:       omp.inner.for.inc:
5510 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5511 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
5512 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5513 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
5514 // CHECK6:       omp.inner.for.end:
5515 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5516 // CHECK6:       omp.loop.exit:
5517 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
5518 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5519 // CHECK6-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5520 // CHECK6-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5521 // CHECK6:       .omp.final.then:
5522 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5523 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5524 // CHECK6:       .omp.final.done:
5525 // CHECK6-NEXT:    ret void
5526 //
5527 //
5528 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
5529 // CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5530 // CHECK6-NEXT:  entry:
5531 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5532 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5533 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5534 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
5535 // CHECK6-NEXT:    ret void
5536 //
5537 //
5538 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..6
5539 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5540 // CHECK6-NEXT:  entry:
5541 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5542 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5543 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5544 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5545 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5546 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5547 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5548 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5549 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5550 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5551 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5552 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5553 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5554 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5555 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5556 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
5557 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5558 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5559 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5560 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5561 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5562 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5563 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
5564 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5565 // CHECK6:       cond.true:
5566 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5567 // CHECK6:       cond.false:
5568 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5569 // CHECK6-NEXT:    br label [[COND_END]]
5570 // CHECK6:       cond.end:
5571 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5572 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5573 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5574 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5575 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5576 // CHECK6:       omp.inner.for.cond:
5577 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5578 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
5579 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5580 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5581 // CHECK6:       omp.inner.for.body:
5582 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
5583 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5584 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
5585 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5586 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !23
5587 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5588 // CHECK6:       omp.inner.for.inc:
5589 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5590 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
5591 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5592 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5593 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
5594 // CHECK6:       omp.inner.for.end:
5595 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5596 // CHECK6:       omp.loop.exit:
5597 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5598 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5599 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5600 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5601 // CHECK6:       .omp.final.then:
5602 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5603 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5604 // CHECK6:       .omp.final.done:
5605 // CHECK6-NEXT:    ret void
5606 //
5607 //
5608 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..7
5609 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5610 // CHECK6-NEXT:  entry:
5611 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5612 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5613 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5614 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5615 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5616 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5617 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5618 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5619 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5620 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5621 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5622 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5623 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5624 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5625 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5626 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5627 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5628 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5629 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5630 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
5631 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5632 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
5633 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5634 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5635 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5636 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5637 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5638 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5639 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5640 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
5641 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
5642 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5643 // CHECK6:       omp.dispatch.cond:
5644 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5645 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5646 // CHECK6-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
5647 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
5648 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5649 // CHECK6:       cond.true:
5650 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5651 // CHECK6-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
5652 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5653 // CHECK6:       cond.false:
5654 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5655 // CHECK6-NEXT:    br label [[COND_END]]
5656 // CHECK6:       cond.end:
5657 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
5658 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5659 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5660 // CHECK6-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
5661 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5662 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5663 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
5664 // CHECK6-NEXT:    br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5665 // CHECK6:       omp.dispatch.body:
5666 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5667 // CHECK6:       omp.inner.for.cond:
5668 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5669 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
5670 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
5671 // CHECK6-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5672 // CHECK6:       omp.inner.for.body:
5673 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5674 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
5675 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5676 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !26
5677 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
5678 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !26
5679 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
5680 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
5681 // CHECK6-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
5682 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5683 // CHECK6:       omp.body.continue:
5684 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5685 // CHECK6:       omp.inner.for.inc:
5686 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5687 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
5688 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5689 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
5690 // CHECK6:       omp.inner.for.end:
5691 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
5692 // CHECK6:       omp.dispatch.inc:
5693 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5694 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5695 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
5696 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
5697 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5698 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5699 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
5700 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
5701 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
5702 // CHECK6:       omp.dispatch.end:
5703 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
5704 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5705 // CHECK6-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
5706 // CHECK6-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5707 // CHECK6:       .omp.final.then:
5708 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5709 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5710 // CHECK6:       .omp.final.done:
5711 // CHECK6-NEXT:    ret void
5712 //
5713 //
5714 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
5715 // CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5716 // CHECK6-NEXT:  entry:
5717 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5718 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5719 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5720 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
5721 // CHECK6-NEXT:    ret void
5722 //
5723 //
5724 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..10
5725 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5726 // CHECK6-NEXT:  entry:
5727 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5728 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5729 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5730 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5731 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5732 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5733 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5734 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5735 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5736 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5737 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5738 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5739 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5740 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5741 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5742 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
5743 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5744 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5745 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5746 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5747 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5748 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5749 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
5750 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5751 // CHECK6:       cond.true:
5752 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5753 // CHECK6:       cond.false:
5754 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5755 // CHECK6-NEXT:    br label [[COND_END]]
5756 // CHECK6:       cond.end:
5757 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5758 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5759 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5760 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5761 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5762 // CHECK6:       omp.inner.for.cond:
5763 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5764 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
5765 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5766 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5767 // CHECK6:       omp.inner.for.body:
5768 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
5769 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5770 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
5771 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5772 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !29
5773 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5774 // CHECK6:       omp.inner.for.inc:
5775 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5776 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
5777 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5778 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5779 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
5780 // CHECK6:       omp.inner.for.end:
5781 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5782 // CHECK6:       omp.loop.exit:
5783 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5784 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5785 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5786 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5787 // CHECK6:       .omp.final.then:
5788 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5789 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5790 // CHECK6:       .omp.final.done:
5791 // CHECK6-NEXT:    ret void
5792 //
5793 //
5794 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11
5795 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5796 // CHECK6-NEXT:  entry:
5797 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5798 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5799 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5800 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5801 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5802 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5803 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5804 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5805 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5806 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5807 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5808 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5809 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5810 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5811 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5812 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5813 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5814 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5815 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5816 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
5817 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5818 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
5819 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5820 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5821 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5822 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5823 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5824 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5825 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5826 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5827 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5828 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5829 // CHECK6-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
5830 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5831 // CHECK6:       omp.dispatch.cond:
5832 // CHECK6-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
5833 // CHECK6-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
5834 // CHECK6-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5835 // CHECK6:       omp.dispatch.body:
5836 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5837 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
5838 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5839 // CHECK6:       omp.inner.for.cond:
5840 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5841 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
5842 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
5843 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5844 // CHECK6:       omp.inner.for.body:
5845 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5846 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
5847 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5848 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
5849 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
5850 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !32
5851 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
5852 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
5853 // CHECK6-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
5854 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5855 // CHECK6:       omp.body.continue:
5856 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5857 // CHECK6:       omp.inner.for.inc:
5858 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5859 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
5860 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5861 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
5862 // CHECK6:       omp.inner.for.end:
5863 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
5864 // CHECK6:       omp.dispatch.inc:
5865 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
5866 // CHECK6:       omp.dispatch.end:
5867 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5868 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5869 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5870 // CHECK6:       .omp.final.then:
5871 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5872 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5873 // CHECK6:       .omp.final.done:
5874 // CHECK6-NEXT:    ret void
5875 //
5876 //
5877 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
5878 // CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5879 // CHECK6-NEXT:  entry:
5880 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5881 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5882 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5883 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
5884 // CHECK6-NEXT:    ret void
5885 //
5886 //
5887 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..14
5888 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5889 // CHECK6-NEXT:  entry:
5890 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5891 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5892 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5893 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5894 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5895 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5896 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5897 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5898 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5899 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5900 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5901 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5902 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5903 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5904 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5905 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
5906 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5907 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5908 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5909 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5910 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5911 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5912 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
5913 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5914 // CHECK6:       cond.true:
5915 // CHECK6-NEXT:    br label [[COND_END:%.*]]
5916 // CHECK6:       cond.false:
5917 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5918 // CHECK6-NEXT:    br label [[COND_END]]
5919 // CHECK6:       cond.end:
5920 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5921 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5922 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5923 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5924 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5925 // CHECK6:       omp.inner.for.cond:
5926 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5927 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
5928 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5929 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5930 // CHECK6:       omp.inner.for.body:
5931 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
5932 // CHECK6-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5933 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
5934 // CHECK6-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5935 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]]), !llvm.access.group !35
5936 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5937 // CHECK6:       omp.inner.for.inc:
5938 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5939 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
5940 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5941 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5942 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
5943 // CHECK6:       omp.inner.for.end:
5944 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5945 // CHECK6:       omp.loop.exit:
5946 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5947 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5948 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
5949 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5950 // CHECK6:       .omp.final.then:
5951 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
5952 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5953 // CHECK6:       .omp.final.done:
5954 // CHECK6-NEXT:    ret void
5955 //
5956 //
5957 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..15
5958 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
5959 // CHECK6-NEXT:  entry:
5960 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5961 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5962 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5963 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5964 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
5965 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5966 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5967 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5968 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5969 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5970 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5971 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
5972 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5973 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5974 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5975 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5976 // CHECK6-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
5977 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
5978 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5979 // CHECK6-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
5980 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5981 // CHECK6-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
5982 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5983 // CHECK6-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
5984 // CHECK6-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5985 // CHECK6-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
5986 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5987 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5988 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5989 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5990 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5991 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5992 // CHECK6-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
5993 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5994 // CHECK6:       omp.dispatch.cond:
5995 // CHECK6-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
5996 // CHECK6-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
5997 // CHECK6-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5998 // CHECK6:       omp.dispatch.body:
5999 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6000 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
6001 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6002 // CHECK6:       omp.inner.for.cond:
6003 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6004 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
6005 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
6006 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6007 // CHECK6:       omp.inner.for.body:
6008 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6009 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
6010 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6011 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
6012 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6013 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !38
6014 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
6015 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
6016 // CHECK6-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
6017 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6018 // CHECK6:       omp.body.continue:
6019 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6020 // CHECK6:       omp.inner.for.inc:
6021 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6022 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
6023 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6024 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
6025 // CHECK6:       omp.inner.for.end:
6026 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6027 // CHECK6:       omp.dispatch.inc:
6028 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
6029 // CHECK6:       omp.dispatch.end:
6030 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6031 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6032 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6033 // CHECK6:       .omp.final.then:
6034 // CHECK6-NEXT:    store i32 123, i32* [[I]], align 4
6035 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6036 // CHECK6:       .omp.final.done:
6037 // CHECK6-NEXT:    ret void
6038 //
6039 //
6040 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
6041 // CHECK6-SAME: () #[[ATTR3:[0-9]+]] {
6042 // CHECK6-NEXT:  entry:
6043 // CHECK6-NEXT:    call void @__tgt_register_requires(i64 1)
6044 // CHECK6-NEXT:    ret void
6045 //
6046 //
6047 // CHECK7-LABEL: define {{[^@]+}}@_Z21teams_template_structv
6048 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
6049 // CHECK7-NEXT:  entry:
6050 // CHECK7-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
6051 // CHECK7-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
6052 // CHECK7-NEXT:    ret i32 [[CALL]]
6053 //
6054 //
6055 // CHECK7-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
6056 // CHECK7-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
6057 // CHECK7-NEXT:  entry:
6058 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6059 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
6060 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
6061 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
6062 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6063 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
6064 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
6065 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
6066 // CHECK7-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
6067 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
6068 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
6069 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
6070 // CHECK7-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
6071 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
6072 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
6073 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
6074 // CHECK7-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
6075 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
6076 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
6077 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
6078 // CHECK7-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
6079 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6080 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6081 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
6082 // CHECK7-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6083 // CHECK7-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
6084 // CHECK7-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
6085 // CHECK7-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6086 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
6087 // CHECK7-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
6088 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6089 // CHECK7-NEXT:    store i8* null, i8** [[TMP4]], align 4
6090 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6091 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6092 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
6093 // CHECK7-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
6094 // CHECK7-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
6095 // CHECK7-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6096 // CHECK7:       omp_offload.failed:
6097 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
6098 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6099 // CHECK7:       omp_offload.cont:
6100 // CHECK7-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
6101 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
6102 // CHECK7-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
6103 // CHECK7-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
6104 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
6105 // CHECK7-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
6106 // CHECK7-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
6107 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
6108 // CHECK7-NEXT:    store i8* null, i8** [[TMP13]], align 4
6109 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
6110 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
6111 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
6112 // CHECK7-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
6113 // CHECK7-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
6114 // CHECK7-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
6115 // CHECK7:       omp_offload.failed7:
6116 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
6117 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
6118 // CHECK7:       omp_offload.cont8:
6119 // CHECK7-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
6120 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
6121 // CHECK7-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
6122 // CHECK7-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
6123 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
6124 // CHECK7-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
6125 // CHECK7-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
6126 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
6127 // CHECK7-NEXT:    store i8* null, i8** [[TMP22]], align 4
6128 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
6129 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
6130 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
6131 // CHECK7-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
6132 // CHECK7-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6133 // CHECK7-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
6134 // CHECK7:       omp_offload.failed14:
6135 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
6136 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
6137 // CHECK7:       omp_offload.cont15:
6138 // CHECK7-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
6139 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
6140 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
6141 // CHECK7-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
6142 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
6143 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
6144 // CHECK7-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
6145 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
6146 // CHECK7-NEXT:    store i8* null, i8** [[TMP31]], align 4
6147 // CHECK7-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
6148 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
6149 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
6150 // CHECK7-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
6151 // CHECK7-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
6152 // CHECK7-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
6153 // CHECK7:       omp_offload.failed21:
6154 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
6155 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
6156 // CHECK7:       omp_offload.cont22:
6157 // CHECK7-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
6158 // CHECK7-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
6159 // CHECK7-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
6160 // CHECK7-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
6161 // CHECK7-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
6162 // CHECK7-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
6163 // CHECK7-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
6164 // CHECK7-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
6165 // CHECK7-NEXT:    store i8* null, i8** [[TMP40]], align 4
6166 // CHECK7-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
6167 // CHECK7-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
6168 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
6169 // CHECK7-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
6170 // CHECK7-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
6171 // CHECK7-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
6172 // CHECK7:       omp_offload.failed28:
6173 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
6174 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
6175 // CHECK7:       omp_offload.cont29:
6176 // CHECK7-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
6177 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
6178 // CHECK7-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
6179 // CHECK7-NEXT:    ret i32 [[TMP45]]
6180 //
6181 //
6182 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
6183 // CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
6184 // CHECK7-NEXT:  entry:
6185 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6186 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6187 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6188 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
6189 // CHECK7-NEXT:    ret void
6190 //
6191 //
6192 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
6193 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6194 // CHECK7-NEXT:  entry:
6195 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6196 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6197 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6198 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6199 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6200 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6201 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6202 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6203 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6204 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6205 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6206 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6207 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6208 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6209 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6210 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
6211 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6212 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6213 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6214 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6215 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6216 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6217 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
6218 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6219 // CHECK7:       cond.true:
6220 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6221 // CHECK7:       cond.false:
6222 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6223 // CHECK7-NEXT:    br label [[COND_END]]
6224 // CHECK7:       cond.end:
6225 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6226 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6227 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6228 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6229 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6230 // CHECK7:       omp.inner.for.cond:
6231 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
6232 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
6233 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6234 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6235 // CHECK7:       omp.inner.for.body:
6236 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !9
6237 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
6238 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !9
6239 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6240 // CHECK7:       omp.inner.for.inc:
6241 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
6242 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !9
6243 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
6244 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
6245 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
6246 // CHECK7:       omp.inner.for.end:
6247 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6248 // CHECK7:       omp.loop.exit:
6249 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6250 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6251 // CHECK7-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6252 // CHECK7-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6253 // CHECK7:       .omp.final.then:
6254 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6255 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6256 // CHECK7:       .omp.final.done:
6257 // CHECK7-NEXT:    ret void
6258 //
6259 //
6260 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
6261 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6262 // CHECK7-NEXT:  entry:
6263 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6264 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6265 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6266 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6267 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6268 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6269 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6270 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6271 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6272 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6273 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6274 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6275 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6276 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6277 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6278 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6279 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6280 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6281 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6282 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
6283 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6284 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6285 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
6286 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
6287 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6288 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6289 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6290 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
6291 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6292 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6293 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
6294 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6295 // CHECK7:       cond.true:
6296 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6297 // CHECK7:       cond.false:
6298 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6299 // CHECK7-NEXT:    br label [[COND_END]]
6300 // CHECK7:       cond.end:
6301 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
6302 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6303 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6304 // CHECK7-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
6305 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6306 // CHECK7:       omp.inner.for.cond:
6307 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
6308 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
6309 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
6310 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6311 // CHECK7:       omp.inner.for.body:
6312 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
6313 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
6314 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6315 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
6316 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6317 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
6318 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
6319 // CHECK7-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
6320 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6321 // CHECK7:       omp.body.continue:
6322 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6323 // CHECK7:       omp.inner.for.inc:
6324 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
6325 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
6326 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
6327 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
6328 // CHECK7:       omp.inner.for.end:
6329 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6330 // CHECK7:       omp.loop.exit:
6331 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
6332 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6333 // CHECK7-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
6334 // CHECK7-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6335 // CHECK7:       .omp.final.then:
6336 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6337 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6338 // CHECK7:       .omp.final.done:
6339 // CHECK7-NEXT:    ret void
6340 //
6341 //
6342 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
6343 // CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6344 // CHECK7-NEXT:  entry:
6345 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6346 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6347 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6348 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
6349 // CHECK7-NEXT:    ret void
6350 //
6351 //
6352 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
6353 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6354 // CHECK7-NEXT:  entry:
6355 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6356 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6357 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6358 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6359 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6360 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6361 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6362 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6363 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6364 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6365 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6366 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6367 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6368 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6369 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6370 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
6371 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6372 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6373 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6374 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6375 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6376 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6377 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
6378 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6379 // CHECK7:       cond.true:
6380 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6381 // CHECK7:       cond.false:
6382 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6383 // CHECK7-NEXT:    br label [[COND_END]]
6384 // CHECK7:       cond.end:
6385 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6386 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6387 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6388 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6389 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6390 // CHECK7:       omp.inner.for.cond:
6391 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
6392 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
6393 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6394 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6395 // CHECK7:       omp.inner.for.body:
6396 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
6397 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
6398 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !18
6399 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6400 // CHECK7:       omp.inner.for.inc:
6401 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
6402 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
6403 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
6404 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
6405 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
6406 // CHECK7:       omp.inner.for.end:
6407 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6408 // CHECK7:       omp.loop.exit:
6409 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6410 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6411 // CHECK7-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6412 // CHECK7-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6413 // CHECK7:       .omp.final.then:
6414 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6415 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6416 // CHECK7:       .omp.final.done:
6417 // CHECK7-NEXT:    ret void
6418 //
6419 //
6420 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
6421 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6422 // CHECK7-NEXT:  entry:
6423 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6424 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6425 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6426 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6427 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6428 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6429 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6430 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6431 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6432 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6433 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6434 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6435 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6436 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6437 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6438 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6439 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6440 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6441 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6442 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
6443 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6444 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6445 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
6446 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
6447 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6448 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6449 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6450 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
6451 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6452 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6453 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
6454 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6455 // CHECK7:       cond.true:
6456 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6457 // CHECK7:       cond.false:
6458 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6459 // CHECK7-NEXT:    br label [[COND_END]]
6460 // CHECK7:       cond.end:
6461 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
6462 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6463 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6464 // CHECK7-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
6465 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6466 // CHECK7:       omp.inner.for.cond:
6467 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
6468 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
6469 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
6470 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6471 // CHECK7:       omp.inner.for.body:
6472 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
6473 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
6474 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6475 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
6476 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6477 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
6478 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
6479 // CHECK7-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
6480 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6481 // CHECK7:       omp.body.continue:
6482 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6483 // CHECK7:       omp.inner.for.inc:
6484 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
6485 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
6486 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
6487 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
6488 // CHECK7:       omp.inner.for.end:
6489 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6490 // CHECK7:       omp.loop.exit:
6491 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
6492 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6493 // CHECK7-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
6494 // CHECK7-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6495 // CHECK7:       .omp.final.then:
6496 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6497 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6498 // CHECK7:       .omp.final.done:
6499 // CHECK7-NEXT:    ret void
6500 //
6501 //
6502 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
6503 // CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6504 // CHECK7-NEXT:  entry:
6505 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6506 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6507 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6508 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
6509 // CHECK7-NEXT:    ret void
6510 //
6511 //
6512 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6
6513 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6514 // CHECK7-NEXT:  entry:
6515 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6516 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6517 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6518 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6519 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6520 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6521 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6522 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6523 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6524 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6525 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6526 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6527 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6528 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6529 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6530 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
6531 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6532 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6533 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6534 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6535 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6536 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6537 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
6538 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6539 // CHECK7:       cond.true:
6540 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6541 // CHECK7:       cond.false:
6542 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6543 // CHECK7-NEXT:    br label [[COND_END]]
6544 // CHECK7:       cond.end:
6545 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6546 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6547 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6548 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6549 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6550 // CHECK7:       omp.inner.for.cond:
6551 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
6552 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
6553 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6554 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6555 // CHECK7:       omp.inner.for.body:
6556 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24
6557 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
6558 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !24
6559 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6560 // CHECK7:       omp.inner.for.inc:
6561 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
6562 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24
6563 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
6564 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
6565 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
6566 // CHECK7:       omp.inner.for.end:
6567 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6568 // CHECK7:       omp.loop.exit:
6569 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6570 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6571 // CHECK7-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6572 // CHECK7-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6573 // CHECK7:       .omp.final.then:
6574 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6575 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6576 // CHECK7:       .omp.final.done:
6577 // CHECK7-NEXT:    ret void
6578 //
6579 //
6580 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..7
6581 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6582 // CHECK7-NEXT:  entry:
6583 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6584 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6585 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6586 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6587 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6588 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6589 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6590 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6591 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6592 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6593 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6594 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6595 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6596 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6597 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6598 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6599 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6600 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6601 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6602 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
6603 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6604 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6605 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
6606 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
6607 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6608 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6609 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6610 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
6611 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
6612 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6613 // CHECK7:       omp.dispatch.cond:
6614 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6615 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6616 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
6617 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6618 // CHECK7:       cond.true:
6619 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6620 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6621 // CHECK7:       cond.false:
6622 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6623 // CHECK7-NEXT:    br label [[COND_END]]
6624 // CHECK7:       cond.end:
6625 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
6626 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6627 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6628 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
6629 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6630 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6631 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
6632 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6633 // CHECK7:       omp.dispatch.body:
6634 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6635 // CHECK7:       omp.inner.for.cond:
6636 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
6637 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
6638 // CHECK7-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
6639 // CHECK7-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6640 // CHECK7:       omp.inner.for.body:
6641 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
6642 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
6643 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6644 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
6645 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6646 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27
6647 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
6648 // CHECK7-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
6649 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6650 // CHECK7:       omp.body.continue:
6651 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6652 // CHECK7:       omp.inner.for.inc:
6653 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
6654 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
6655 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
6656 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
6657 // CHECK7:       omp.inner.for.end:
6658 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6659 // CHECK7:       omp.dispatch.inc:
6660 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6661 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6662 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
6663 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
6664 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6665 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6666 // CHECK7-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6667 // CHECK7-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
6668 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
6669 // CHECK7:       omp.dispatch.end:
6670 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
6671 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6672 // CHECK7-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
6673 // CHECK7-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6674 // CHECK7:       .omp.final.then:
6675 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6676 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6677 // CHECK7:       .omp.final.done:
6678 // CHECK7-NEXT:    ret void
6679 //
6680 //
6681 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
6682 // CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6683 // CHECK7-NEXT:  entry:
6684 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6685 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6686 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6687 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
6688 // CHECK7-NEXT:    ret void
6689 //
6690 //
6691 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..10
6692 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6693 // CHECK7-NEXT:  entry:
6694 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6695 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6696 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6697 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6698 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6699 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6700 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6701 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6702 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6703 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6704 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6705 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6706 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6707 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6708 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6709 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
6710 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6711 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6712 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6713 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6714 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6715 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6716 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
6717 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6718 // CHECK7:       cond.true:
6719 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6720 // CHECK7:       cond.false:
6721 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6722 // CHECK7-NEXT:    br label [[COND_END]]
6723 // CHECK7:       cond.end:
6724 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6725 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6726 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6727 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6728 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6729 // CHECK7:       omp.inner.for.cond:
6730 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6731 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
6732 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6733 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6734 // CHECK7:       omp.inner.for.body:
6735 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30
6736 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
6737 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !30
6738 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6739 // CHECK7:       omp.inner.for.inc:
6740 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6741 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30
6742 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
6743 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6744 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
6745 // CHECK7:       omp.inner.for.end:
6746 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6747 // CHECK7:       omp.loop.exit:
6748 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6749 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6750 // CHECK7-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6751 // CHECK7-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6752 // CHECK7:       .omp.final.then:
6753 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6754 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6755 // CHECK7:       .omp.final.done:
6756 // CHECK7-NEXT:    ret void
6757 //
6758 //
6759 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11
6760 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6761 // CHECK7-NEXT:  entry:
6762 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6763 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6764 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6765 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6766 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6767 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6768 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6769 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6770 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6771 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6772 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6773 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6774 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6775 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6776 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6777 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6778 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6779 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6780 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6781 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
6782 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6783 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6784 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
6785 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
6786 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6787 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6788 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6789 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6790 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6791 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
6792 // CHECK7-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
6793 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6794 // CHECK7:       omp.dispatch.cond:
6795 // CHECK7-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6796 // CHECK7-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
6797 // CHECK7-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6798 // CHECK7:       omp.dispatch.body:
6799 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6800 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
6801 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6802 // CHECK7:       omp.inner.for.cond:
6803 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6804 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
6805 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
6806 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6807 // CHECK7:       omp.inner.for.body:
6808 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6809 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
6810 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6811 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
6812 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6813 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !33
6814 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
6815 // CHECK7-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
6816 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6817 // CHECK7:       omp.body.continue:
6818 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6819 // CHECK7:       omp.inner.for.inc:
6820 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6821 // CHECK7-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
6822 // CHECK7-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6823 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
6824 // CHECK7:       omp.inner.for.end:
6825 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6826 // CHECK7:       omp.dispatch.inc:
6827 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
6828 // CHECK7:       omp.dispatch.end:
6829 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6830 // CHECK7-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6831 // CHECK7-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6832 // CHECK7:       .omp.final.then:
6833 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6834 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6835 // CHECK7:       .omp.final.done:
6836 // CHECK7-NEXT:    ret void
6837 //
6838 //
6839 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
6840 // CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6841 // CHECK7-NEXT:  entry:
6842 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6843 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6844 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6845 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
6846 // CHECK7-NEXT:    ret void
6847 //
6848 //
6849 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..14
6850 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6851 // CHECK7-NEXT:  entry:
6852 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6853 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6854 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6855 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6856 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6857 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6858 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6859 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6860 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6861 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6862 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6863 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6864 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6865 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6866 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6867 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
6868 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6869 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6870 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6871 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6872 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6873 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6874 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
6875 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6876 // CHECK7:       cond.true:
6877 // CHECK7-NEXT:    br label [[COND_END:%.*]]
6878 // CHECK7:       cond.false:
6879 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6880 // CHECK7-NEXT:    br label [[COND_END]]
6881 // CHECK7:       cond.end:
6882 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6883 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6884 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6885 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6886 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6887 // CHECK7:       omp.inner.for.cond:
6888 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6889 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
6890 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6891 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6892 // CHECK7:       omp.inner.for.body:
6893 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36
6894 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
6895 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !36
6896 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6897 // CHECK7:       omp.inner.for.inc:
6898 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6899 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36
6900 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
6901 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6902 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
6903 // CHECK7:       omp.inner.for.end:
6904 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6905 // CHECK7:       omp.loop.exit:
6906 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6907 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6908 // CHECK7-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6909 // CHECK7-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6910 // CHECK7:       .omp.final.then:
6911 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6912 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6913 // CHECK7:       .omp.final.done:
6914 // CHECK7-NEXT:    ret void
6915 //
6916 //
6917 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..15
6918 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
6919 // CHECK7-NEXT:  entry:
6920 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6921 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6922 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6923 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6924 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
6925 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6926 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6927 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6928 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6929 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6930 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6931 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
6932 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6933 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6934 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6935 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6936 // CHECK7-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
6937 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
6938 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6939 // CHECK7-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
6940 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6941 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6942 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
6943 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
6944 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6945 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6946 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6947 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6948 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6949 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
6950 // CHECK7-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
6951 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6952 // CHECK7:       omp.dispatch.cond:
6953 // CHECK7-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6954 // CHECK7-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
6955 // CHECK7-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6956 // CHECK7:       omp.dispatch.body:
6957 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6958 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
6959 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6960 // CHECK7:       omp.inner.for.cond:
6961 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6962 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
6963 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
6964 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6965 // CHECK7:       omp.inner.for.body:
6966 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6967 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
6968 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6969 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
6970 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
6971 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !39
6972 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
6973 // CHECK7-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !39
6974 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6975 // CHECK7:       omp.body.continue:
6976 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6977 // CHECK7:       omp.inner.for.inc:
6978 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6979 // CHECK7-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
6980 // CHECK7-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6981 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
6982 // CHECK7:       omp.inner.for.end:
6983 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6984 // CHECK7:       omp.dispatch.inc:
6985 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
6986 // CHECK7:       omp.dispatch.end:
6987 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6988 // CHECK7-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6989 // CHECK7-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6990 // CHECK7:       .omp.final.then:
6991 // CHECK7-NEXT:    store i32 123, i32* [[I]], align 4
6992 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6993 // CHECK7:       .omp.final.done:
6994 // CHECK7-NEXT:    ret void
6995 //
6996 //
6997 // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
6998 // CHECK7-SAME: () #[[ATTR3:[0-9]+]] {
6999 // CHECK7-NEXT:  entry:
7000 // CHECK7-NEXT:    call void @__tgt_register_requires(i64 1)
7001 // CHECK7-NEXT:    ret void
7002 //
7003 //
7004 // CHECK8-LABEL: define {{[^@]+}}@_Z21teams_template_structv
7005 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
7006 // CHECK8-NEXT:  entry:
7007 // CHECK8-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
7008 // CHECK8-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
7009 // CHECK8-NEXT:    ret i32 [[CALL]]
7010 //
7011 //
7012 // CHECK8-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
7013 // CHECK8-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
7014 // CHECK8-NEXT:  entry:
7015 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7016 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
7017 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
7018 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
7019 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7020 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
7021 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
7022 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
7023 // CHECK8-NEXT:    [[_TMP6:%.*]] = alloca i32, align 4
7024 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
7025 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
7026 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
7027 // CHECK8-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
7028 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
7029 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
7030 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
7031 // CHECK8-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
7032 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
7033 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
7034 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
7035 // CHECK8-NEXT:    [[_TMP27:%.*]] = alloca i32, align 4
7036 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7037 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7038 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
7039 // CHECK8-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7040 // CHECK8-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
7041 // CHECK8-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
7042 // CHECK8-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7043 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
7044 // CHECK8-NEXT:    store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
7045 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
7046 // CHECK8-NEXT:    store i8* null, i8** [[TMP4]], align 4
7047 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7048 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7049 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
7050 // CHECK8-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
7051 // CHECK8-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
7052 // CHECK8-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7053 // CHECK8:       omp_offload.failed:
7054 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
7055 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7056 // CHECK8:       omp_offload.cont:
7057 // CHECK8-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
7058 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
7059 // CHECK8-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
7060 // CHECK8-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
7061 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
7062 // CHECK8-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
7063 // CHECK8-NEXT:    store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
7064 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
7065 // CHECK8-NEXT:    store i8* null, i8** [[TMP13]], align 4
7066 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
7067 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
7068 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
7069 // CHECK8-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
7070 // CHECK8-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
7071 // CHECK8-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
7072 // CHECK8:       omp_offload.failed7:
7073 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
7074 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
7075 // CHECK8:       omp_offload.cont8:
7076 // CHECK8-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
7077 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
7078 // CHECK8-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
7079 // CHECK8-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
7080 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
7081 // CHECK8-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
7082 // CHECK8-NEXT:    store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
7083 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
7084 // CHECK8-NEXT:    store i8* null, i8** [[TMP22]], align 4
7085 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
7086 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
7087 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
7088 // CHECK8-NEXT:    [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
7089 // CHECK8-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
7090 // CHECK8-NEXT:    br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
7091 // CHECK8:       omp_offload.failed14:
7092 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
7093 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
7094 // CHECK8:       omp_offload.cont15:
7095 // CHECK8-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
7096 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
7097 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
7098 // CHECK8-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
7099 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
7100 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
7101 // CHECK8-NEXT:    store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
7102 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
7103 // CHECK8-NEXT:    store i8* null, i8** [[TMP31]], align 4
7104 // CHECK8-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
7105 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
7106 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
7107 // CHECK8-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
7108 // CHECK8-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
7109 // CHECK8-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
7110 // CHECK8:       omp_offload.failed21:
7111 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
7112 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
7113 // CHECK8:       omp_offload.cont22:
7114 // CHECK8-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
7115 // CHECK8-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
7116 // CHECK8-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
7117 // CHECK8-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
7118 // CHECK8-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
7119 // CHECK8-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
7120 // CHECK8-NEXT:    store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
7121 // CHECK8-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
7122 // CHECK8-NEXT:    store i8* null, i8** [[TMP40]], align 4
7123 // CHECK8-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
7124 // CHECK8-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
7125 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
7126 // CHECK8-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
7127 // CHECK8-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
7128 // CHECK8-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
7129 // CHECK8:       omp_offload.failed28:
7130 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
7131 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT29]]
7132 // CHECK8:       omp_offload.cont29:
7133 // CHECK8-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
7134 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
7135 // CHECK8-NEXT:    [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
7136 // CHECK8-NEXT:    ret i32 [[TMP45]]
7137 //
7138 //
7139 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
7140 // CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
7141 // CHECK8-NEXT:  entry:
7142 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7143 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7144 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7145 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
7146 // CHECK8-NEXT:    ret void
7147 //
7148 //
7149 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
7150 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7151 // CHECK8-NEXT:  entry:
7152 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7153 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7154 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7155 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7156 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7157 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7158 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7159 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7160 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7161 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7162 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7163 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7164 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7165 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7166 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7167 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
7168 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7169 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7170 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7171 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7172 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7173 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7174 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
7175 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7176 // CHECK8:       cond.true:
7177 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7178 // CHECK8:       cond.false:
7179 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7180 // CHECK8-NEXT:    br label [[COND_END]]
7181 // CHECK8:       cond.end:
7182 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7183 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7184 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7185 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7186 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7187 // CHECK8:       omp.inner.for.cond:
7188 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
7189 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
7190 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7191 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7192 // CHECK8:       omp.inner.for.body:
7193 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !9
7194 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !9
7195 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !9
7196 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7197 // CHECK8:       omp.inner.for.inc:
7198 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
7199 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !9
7200 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
7201 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
7202 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
7203 // CHECK8:       omp.inner.for.end:
7204 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7205 // CHECK8:       omp.loop.exit:
7206 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7207 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7208 // CHECK8-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
7209 // CHECK8-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7210 // CHECK8:       .omp.final.then:
7211 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7212 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7213 // CHECK8:       .omp.final.done:
7214 // CHECK8-NEXT:    ret void
7215 //
7216 //
7217 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..1
7218 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7219 // CHECK8-NEXT:  entry:
7220 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7221 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7222 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7223 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7224 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7225 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7226 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7227 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7228 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7229 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7230 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7231 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7232 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7233 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7234 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7235 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7236 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7237 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7238 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7239 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
7240 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7241 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7242 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
7243 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
7244 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7245 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7246 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7247 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
7248 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7249 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7250 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
7251 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7252 // CHECK8:       cond.true:
7253 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7254 // CHECK8:       cond.false:
7255 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7256 // CHECK8-NEXT:    br label [[COND_END]]
7257 // CHECK8:       cond.end:
7258 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7259 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7260 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7261 // CHECK8-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
7262 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7263 // CHECK8:       omp.inner.for.cond:
7264 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
7265 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
7266 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7267 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7268 // CHECK8:       omp.inner.for.body:
7269 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
7270 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7271 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7272 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
7273 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
7274 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
7275 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
7276 // CHECK8-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
7277 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7278 // CHECK8:       omp.body.continue:
7279 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7280 // CHECK8:       omp.inner.for.inc:
7281 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
7282 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
7283 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
7284 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
7285 // CHECK8:       omp.inner.for.end:
7286 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7287 // CHECK8:       omp.loop.exit:
7288 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
7289 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7290 // CHECK8-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7291 // CHECK8-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7292 // CHECK8:       .omp.final.then:
7293 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7294 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7295 // CHECK8:       .omp.final.done:
7296 // CHECK8-NEXT:    ret void
7297 //
7298 //
7299 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
7300 // CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7301 // CHECK8-NEXT:  entry:
7302 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7303 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7304 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7305 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
7306 // CHECK8-NEXT:    ret void
7307 //
7308 //
7309 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
7310 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7311 // CHECK8-NEXT:  entry:
7312 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7313 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7314 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7315 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7316 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7317 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7318 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7319 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7320 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7321 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7322 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7323 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7324 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7325 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7326 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7327 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
7328 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7329 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7330 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7331 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7332 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7333 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7334 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
7335 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7336 // CHECK8:       cond.true:
7337 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7338 // CHECK8:       cond.false:
7339 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7340 // CHECK8-NEXT:    br label [[COND_END]]
7341 // CHECK8:       cond.end:
7342 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7343 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7344 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7345 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7346 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7347 // CHECK8:       omp.inner.for.cond:
7348 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
7349 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
7350 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7351 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7352 // CHECK8:       omp.inner.for.body:
7353 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
7354 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
7355 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !18
7356 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7357 // CHECK8:       omp.inner.for.inc:
7358 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
7359 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
7360 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
7361 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
7362 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
7363 // CHECK8:       omp.inner.for.end:
7364 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7365 // CHECK8:       omp.loop.exit:
7366 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7367 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7368 // CHECK8-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
7369 // CHECK8-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7370 // CHECK8:       .omp.final.then:
7371 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7372 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7373 // CHECK8:       .omp.final.done:
7374 // CHECK8-NEXT:    ret void
7375 //
7376 //
7377 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
7378 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7379 // CHECK8-NEXT:  entry:
7380 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7381 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7382 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7383 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7384 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7385 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7386 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7387 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7388 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7389 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7390 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7391 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7392 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7393 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7394 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7395 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7396 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7397 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7398 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7399 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
7400 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7401 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7402 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
7403 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
7404 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7405 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7406 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7407 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
7408 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7409 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7410 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
7411 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7412 // CHECK8:       cond.true:
7413 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7414 // CHECK8:       cond.false:
7415 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7416 // CHECK8-NEXT:    br label [[COND_END]]
7417 // CHECK8:       cond.end:
7418 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7419 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7420 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7421 // CHECK8-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
7422 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7423 // CHECK8:       omp.inner.for.cond:
7424 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
7425 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
7426 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7427 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7428 // CHECK8:       omp.inner.for.body:
7429 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
7430 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7431 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7432 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
7433 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
7434 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
7435 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
7436 // CHECK8-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
7437 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7438 // CHECK8:       omp.body.continue:
7439 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7440 // CHECK8:       omp.inner.for.inc:
7441 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
7442 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
7443 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
7444 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
7445 // CHECK8:       omp.inner.for.end:
7446 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7447 // CHECK8:       omp.loop.exit:
7448 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
7449 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7450 // CHECK8-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7451 // CHECK8-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7452 // CHECK8:       .omp.final.then:
7453 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7454 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7455 // CHECK8:       .omp.final.done:
7456 // CHECK8-NEXT:    ret void
7457 //
7458 //
7459 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
7460 // CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7461 // CHECK8-NEXT:  entry:
7462 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7463 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7464 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7465 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
7466 // CHECK8-NEXT:    ret void
7467 //
7468 //
7469 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..6
7470 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7471 // CHECK8-NEXT:  entry:
7472 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7473 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7474 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7475 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7476 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7477 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7478 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7479 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7480 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7481 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7482 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7483 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7484 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7485 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7486 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7487 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
7488 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7489 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7490 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7491 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7492 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7493 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7494 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
7495 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7496 // CHECK8:       cond.true:
7497 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7498 // CHECK8:       cond.false:
7499 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7500 // CHECK8-NEXT:    br label [[COND_END]]
7501 // CHECK8:       cond.end:
7502 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7503 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7504 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7505 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7506 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7507 // CHECK8:       omp.inner.for.cond:
7508 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
7509 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
7510 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7511 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7512 // CHECK8:       omp.inner.for.body:
7513 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24
7514 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24
7515 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !24
7516 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7517 // CHECK8:       omp.inner.for.inc:
7518 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
7519 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24
7520 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
7521 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
7522 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
7523 // CHECK8:       omp.inner.for.end:
7524 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7525 // CHECK8:       omp.loop.exit:
7526 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7527 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7528 // CHECK8-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
7529 // CHECK8-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7530 // CHECK8:       .omp.final.then:
7531 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7532 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7533 // CHECK8:       .omp.final.done:
7534 // CHECK8-NEXT:    ret void
7535 //
7536 //
7537 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..7
7538 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7539 // CHECK8-NEXT:  entry:
7540 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7541 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7542 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7543 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7544 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7545 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7546 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7547 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7548 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7549 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7550 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7551 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7552 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7553 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7554 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7555 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7556 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7557 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7558 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7559 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
7560 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7561 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7562 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
7563 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
7564 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7565 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7566 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7567 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
7568 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
7569 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7570 // CHECK8:       omp.dispatch.cond:
7571 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7572 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7573 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
7574 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7575 // CHECK8:       cond.true:
7576 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7577 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7578 // CHECK8:       cond.false:
7579 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7580 // CHECK8-NEXT:    br label [[COND_END]]
7581 // CHECK8:       cond.end:
7582 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
7583 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7584 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7585 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
7586 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7587 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7588 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
7589 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7590 // CHECK8:       omp.dispatch.body:
7591 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7592 // CHECK8:       omp.inner.for.cond:
7593 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
7594 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
7595 // CHECK8-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
7596 // CHECK8-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7597 // CHECK8:       omp.inner.for.body:
7598 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
7599 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
7600 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7601 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
7602 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
7603 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27
7604 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
7605 // CHECK8-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
7606 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7607 // CHECK8:       omp.body.continue:
7608 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7609 // CHECK8:       omp.inner.for.inc:
7610 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
7611 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
7612 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
7613 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
7614 // CHECK8:       omp.inner.for.end:
7615 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7616 // CHECK8:       omp.dispatch.inc:
7617 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7618 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7619 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
7620 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
7621 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7622 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7623 // CHECK8-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
7624 // CHECK8-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
7625 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
7626 // CHECK8:       omp.dispatch.end:
7627 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
7628 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7629 // CHECK8-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
7630 // CHECK8-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7631 // CHECK8:       .omp.final.then:
7632 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7633 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7634 // CHECK8:       .omp.final.done:
7635 // CHECK8-NEXT:    ret void
7636 //
7637 //
7638 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
7639 // CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7640 // CHECK8-NEXT:  entry:
7641 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7642 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7643 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7644 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
7645 // CHECK8-NEXT:    ret void
7646 //
7647 //
7648 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..10
7649 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7650 // CHECK8-NEXT:  entry:
7651 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7652 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7653 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7654 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7655 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7656 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7657 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7658 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7659 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7660 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7661 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7662 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7663 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7664 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7665 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7666 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
7667 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7668 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7669 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7670 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7671 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7672 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7673 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
7674 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7675 // CHECK8:       cond.true:
7676 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7677 // CHECK8:       cond.false:
7678 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7679 // CHECK8-NEXT:    br label [[COND_END]]
7680 // CHECK8:       cond.end:
7681 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7682 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7683 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7684 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7685 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7686 // CHECK8:       omp.inner.for.cond:
7687 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7688 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
7689 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7690 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7691 // CHECK8:       omp.inner.for.body:
7692 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30
7693 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30
7694 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !30
7695 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7696 // CHECK8:       omp.inner.for.inc:
7697 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7698 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30
7699 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
7700 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7701 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
7702 // CHECK8:       omp.inner.for.end:
7703 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7704 // CHECK8:       omp.loop.exit:
7705 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7706 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7707 // CHECK8-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
7708 // CHECK8-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7709 // CHECK8:       .omp.final.then:
7710 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7711 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7712 // CHECK8:       .omp.final.done:
7713 // CHECK8-NEXT:    ret void
7714 //
7715 //
7716 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11
7717 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7718 // CHECK8-NEXT:  entry:
7719 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7720 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7721 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7722 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7723 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7724 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7725 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7726 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7727 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7728 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7729 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7730 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7731 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7732 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7733 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7734 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7735 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7736 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7737 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7738 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
7739 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7740 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7741 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
7742 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
7743 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7744 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7745 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7746 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7747 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7748 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
7749 // CHECK8-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
7750 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7751 // CHECK8:       omp.dispatch.cond:
7752 // CHECK8-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
7753 // CHECK8-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
7754 // CHECK8-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7755 // CHECK8:       omp.dispatch.body:
7756 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7757 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
7758 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7759 // CHECK8:       omp.inner.for.cond:
7760 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7761 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
7762 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
7763 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7764 // CHECK8:       omp.inner.for.body:
7765 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7766 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
7767 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7768 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
7769 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
7770 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !33
7771 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
7772 // CHECK8-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
7773 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7774 // CHECK8:       omp.body.continue:
7775 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7776 // CHECK8:       omp.inner.for.inc:
7777 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7778 // CHECK8-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
7779 // CHECK8-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7780 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
7781 // CHECK8:       omp.inner.for.end:
7782 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7783 // CHECK8:       omp.dispatch.inc:
7784 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
7785 // CHECK8:       omp.dispatch.end:
7786 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7787 // CHECK8-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7788 // CHECK8-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7789 // CHECK8:       .omp.final.then:
7790 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7791 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7792 // CHECK8:       .omp.final.done:
7793 // CHECK8-NEXT:    ret void
7794 //
7795 //
7796 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
7797 // CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7798 // CHECK8-NEXT:  entry:
7799 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7800 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7801 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7802 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
7803 // CHECK8-NEXT:    ret void
7804 //
7805 //
7806 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..14
7807 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7808 // CHECK8-NEXT:  entry:
7809 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7810 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7811 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7812 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7813 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7814 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7815 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7816 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7817 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7818 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7819 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7820 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7821 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7822 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7823 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7824 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
7825 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7826 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7827 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7828 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7829 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7830 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7831 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
7832 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7833 // CHECK8:       cond.true:
7834 // CHECK8-NEXT:    br label [[COND_END:%.*]]
7835 // CHECK8:       cond.false:
7836 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7837 // CHECK8-NEXT:    br label [[COND_END]]
7838 // CHECK8:       cond.end:
7839 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7840 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7841 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7842 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7843 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7844 // CHECK8:       omp.inner.for.cond:
7845 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
7846 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
7847 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7848 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7849 // CHECK8:       omp.inner.for.body:
7850 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36
7851 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36
7852 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]]), !llvm.access.group !36
7853 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7854 // CHECK8:       omp.inner.for.inc:
7855 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
7856 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36
7857 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
7858 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
7859 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
7860 // CHECK8:       omp.inner.for.end:
7861 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7862 // CHECK8:       omp.loop.exit:
7863 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7864 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7865 // CHECK8-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
7866 // CHECK8-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7867 // CHECK8:       .omp.final.then:
7868 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7869 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7870 // CHECK8:       .omp.final.done:
7871 // CHECK8-NEXT:    ret void
7872 //
7873 //
7874 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..15
7875 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
7876 // CHECK8-NEXT:  entry:
7877 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7878 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7879 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7880 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7881 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
7882 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7883 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7884 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7885 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7886 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7887 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7888 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
7889 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7890 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7891 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7892 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7893 // CHECK8-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
7894 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
7895 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7896 // CHECK8-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
7897 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7898 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7899 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
7900 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
7901 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7902 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7903 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7904 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7905 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7906 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
7907 // CHECK8-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
7908 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7909 // CHECK8:       omp.dispatch.cond:
7910 // CHECK8-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
7911 // CHECK8-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
7912 // CHECK8-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7913 // CHECK8:       omp.dispatch.body:
7914 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7915 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
7916 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7917 // CHECK8:       omp.inner.for.cond:
7918 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
7919 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
7920 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
7921 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7922 // CHECK8:       omp.inner.for.body:
7923 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
7924 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
7925 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7926 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
7927 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
7928 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !39
7929 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
7930 // CHECK8-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !39
7931 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7932 // CHECK8:       omp.body.continue:
7933 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7934 // CHECK8:       omp.inner.for.inc:
7935 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
7936 // CHECK8-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
7937 // CHECK8-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
7938 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
7939 // CHECK8:       omp.inner.for.end:
7940 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7941 // CHECK8:       omp.dispatch.inc:
7942 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
7943 // CHECK8:       omp.dispatch.end:
7944 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7945 // CHECK8-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7946 // CHECK8-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7947 // CHECK8:       .omp.final.then:
7948 // CHECK8-NEXT:    store i32 123, i32* [[I]], align 4
7949 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7950 // CHECK8:       .omp.final.done:
7951 // CHECK8-NEXT:    ret void
7952 //
7953 //
7954 // CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
7955 // CHECK8-SAME: () #[[ATTR3:[0-9]+]] {
7956 // CHECK8-NEXT:  entry:
7957 // CHECK8-NEXT:    call void @__tgt_register_requires(i64 1)
7958 // CHECK8-NEXT:    ret void
7959 //
7960 //
7961 // CHECK9-LABEL: define {{[^@]+}}@_Z21teams_template_structv
7962 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
7963 // CHECK9-NEXT:  entry:
7964 // CHECK9-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
7965 // CHECK9-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
7966 // CHECK9-NEXT:    ret i32 [[CALL]]
7967 //
7968 //
7969 // CHECK9-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
7970 // CHECK9-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
7971 // CHECK9-NEXT:  entry:
7972 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
7973 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7974 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7975 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7976 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7977 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7978 // CHECK9-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
7979 // CHECK9-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
7980 // CHECK9-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
7981 // CHECK9-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
7982 // CHECK9-NEXT:    [[I7:%.*]] = alloca i32, align 4
7983 // CHECK9-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
7984 // CHECK9-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
7985 // CHECK9-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
7986 // CHECK9-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
7987 // CHECK9-NEXT:    [[I24:%.*]] = alloca i32, align 4
7988 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
7989 // CHECK9-NEXT:    [[DOTOMP_LB38:%.*]] = alloca i32, align 4
7990 // CHECK9-NEXT:    [[DOTOMP_UB39:%.*]] = alloca i32, align 4
7991 // CHECK9-NEXT:    [[DOTOMP_IV40:%.*]] = alloca i32, align 4
7992 // CHECK9-NEXT:    [[I41:%.*]] = alloca i32, align 4
7993 // CHECK9-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
7994 // CHECK9-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
7995 // CHECK9-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
7996 // CHECK9-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
7997 // CHECK9-NEXT:    [[I58:%.*]] = alloca i32, align 4
7998 // CHECK9-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
7999 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
8000 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8001 // CHECK9-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
8002 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8003 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
8004 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8005 // CHECK9:       omp.inner.for.cond:
8006 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8007 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
8008 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
8009 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8010 // CHECK9:       omp.inner.for.body:
8011 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8012 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
8013 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8014 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
8015 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
8016 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
8017 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
8018 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
8019 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
8020 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8021 // CHECK9:       omp.body.continue:
8022 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8023 // CHECK9:       omp.inner.for.inc:
8024 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8025 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
8026 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8027 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
8028 // CHECK9:       omp.inner.for.end:
8029 // CHECK9-NEXT:    store i32 123, i32* [[I]], align 4
8030 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
8031 // CHECK9-NEXT:    store i32 122, i32* [[DOTOMP_UB5]], align 4
8032 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
8033 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4
8034 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
8035 // CHECK9:       omp.inner.for.cond8:
8036 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8037 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
8038 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
8039 // CHECK9-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
8040 // CHECK9:       omp.inner.for.body10:
8041 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8042 // CHECK9-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
8043 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8044 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !6
8045 // CHECK9-NEXT:    [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8046 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !6
8047 // CHECK9-NEXT:    [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64
8048 // CHECK9-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i64 0, i64 [[IDXPROM14]]
8049 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX15]], align 4, !llvm.access.group !6
8050 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
8051 // CHECK9:       omp.body.continue16:
8052 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
8053 // CHECK9:       omp.inner.for.inc17:
8054 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8055 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1
8056 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8057 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
8058 // CHECK9:       omp.inner.for.end19:
8059 // CHECK9-NEXT:    store i32 123, i32* [[I7]], align 4
8060 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
8061 // CHECK9-NEXT:    store i32 122, i32* [[DOTOMP_UB22]], align 4
8062 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
8063 // CHECK9-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV23]], align 4
8064 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
8065 // CHECK9:       omp.inner.for.cond25:
8066 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8067 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
8068 // CHECK9-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
8069 // CHECK9-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]]
8070 // CHECK9:       omp.inner.for.body27:
8071 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8072 // CHECK9-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1
8073 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
8074 // CHECK9-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
8075 // CHECK9-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8076 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I24]], align 4, !llvm.access.group !9
8077 // CHECK9-NEXT:    [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64
8078 // CHECK9-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 [[IDXPROM31]]
8079 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !9
8080 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE33:%.*]]
8081 // CHECK9:       omp.body.continue33:
8082 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC34:%.*]]
8083 // CHECK9:       omp.inner.for.inc34:
8084 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8085 // CHECK9-NEXT:    [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1
8086 // CHECK9-NEXT:    store i32 [[ADD35]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8087 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
8088 // CHECK9:       omp.inner.for.end36:
8089 // CHECK9-NEXT:    store i32 123, i32* [[I24]], align 4
8090 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB38]], align 4
8091 // CHECK9-NEXT:    store i32 122, i32* [[DOTOMP_UB39]], align 4
8092 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB38]], align 4
8093 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV40]], align 4
8094 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND42:%.*]]
8095 // CHECK9:       omp.inner.for.cond42:
8096 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8097 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB39]], align 4, !llvm.access.group !12
8098 // CHECK9-NEXT:    [[CMP43:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8099 // CHECK9-NEXT:    br i1 [[CMP43]], label [[OMP_INNER_FOR_BODY44:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
8100 // CHECK9:       omp.inner.for.body44:
8101 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8102 // CHECK9-NEXT:    [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 1
8103 // CHECK9-NEXT:    [[ADD46:%.*]] = add nsw i32 0, [[MUL45]]
8104 // CHECK9-NEXT:    store i32 [[ADD46]], i32* [[I41]], align 4, !llvm.access.group !12
8105 // CHECK9-NEXT:    [[A47:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8106 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I41]], align 4, !llvm.access.group !12
8107 // CHECK9-NEXT:    [[IDXPROM48:%.*]] = sext i32 [[TMP22]] to i64
8108 // CHECK9-NEXT:    [[ARRAYIDX49:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A47]], i64 0, i64 [[IDXPROM48]]
8109 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX49]], align 4, !llvm.access.group !12
8110 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
8111 // CHECK9:       omp.body.continue50:
8112 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
8113 // CHECK9:       omp.inner.for.inc51:
8114 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8115 // CHECK9-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP23]], 1
8116 // CHECK9-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8117 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND42]], !llvm.loop [[LOOP13:![0-9]+]]
8118 // CHECK9:       omp.inner.for.end53:
8119 // CHECK9-NEXT:    store i32 123, i32* [[I41]], align 4
8120 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
8121 // CHECK9-NEXT:    store i32 122, i32* [[DOTOMP_UB56]], align 4
8122 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
8123 // CHECK9-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV57]], align 4
8124 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
8125 // CHECK9:       omp.inner.for.cond59:
8126 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8127 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
8128 // CHECK9-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
8129 // CHECK9-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END70:%.*]]
8130 // CHECK9:       omp.inner.for.body61:
8131 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8132 // CHECK9-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP27]], 1
8133 // CHECK9-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
8134 // CHECK9-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
8135 // CHECK9-NEXT:    [[A64:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8136 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I58]], align 4, !llvm.access.group !15
8137 // CHECK9-NEXT:    [[IDXPROM65:%.*]] = sext i32 [[TMP28]] to i64
8138 // CHECK9-NEXT:    [[ARRAYIDX66:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A64]], i64 0, i64 [[IDXPROM65]]
8139 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX66]], align 4, !llvm.access.group !15
8140 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE67:%.*]]
8141 // CHECK9:       omp.body.continue67:
8142 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC68:%.*]]
8143 // CHECK9:       omp.inner.for.inc68:
8144 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8145 // CHECK9-NEXT:    [[ADD69:%.*]] = add nsw i32 [[TMP29]], 1
8146 // CHECK9-NEXT:    store i32 [[ADD69]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8147 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
8148 // CHECK9:       omp.inner.for.end70:
8149 // CHECK9-NEXT:    store i32 123, i32* [[I58]], align 4
8150 // CHECK9-NEXT:    [[A71:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8151 // CHECK9-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A71]], i64 0, i64 0
8152 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX72]], align 4
8153 // CHECK9-NEXT:    ret i32 [[TMP30]]
8154 //
8155 //
8156 // CHECK10-LABEL: define {{[^@]+}}@_Z21teams_template_structv
8157 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
8158 // CHECK10-NEXT:  entry:
8159 // CHECK10-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
8160 // CHECK10-NEXT:    [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
8161 // CHECK10-NEXT:    ret i32 [[CALL]]
8162 //
8163 //
8164 // CHECK10-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
8165 // CHECK10-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
8166 // CHECK10-NEXT:  entry:
8167 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
8168 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8169 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8170 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8171 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8172 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8173 // CHECK10-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
8174 // CHECK10-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
8175 // CHECK10-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
8176 // CHECK10-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
8177 // CHECK10-NEXT:    [[I7:%.*]] = alloca i32, align 4
8178 // CHECK10-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
8179 // CHECK10-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
8180 // CHECK10-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
8181 // CHECK10-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
8182 // CHECK10-NEXT:    [[I24:%.*]] = alloca i32, align 4
8183 // CHECK10-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
8184 // CHECK10-NEXT:    [[DOTOMP_LB38:%.*]] = alloca i32, align 4
8185 // CHECK10-NEXT:    [[DOTOMP_UB39:%.*]] = alloca i32, align 4
8186 // CHECK10-NEXT:    [[DOTOMP_IV40:%.*]] = alloca i32, align 4
8187 // CHECK10-NEXT:    [[I41:%.*]] = alloca i32, align 4
8188 // CHECK10-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
8189 // CHECK10-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
8190 // CHECK10-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
8191 // CHECK10-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
8192 // CHECK10-NEXT:    [[I58:%.*]] = alloca i32, align 4
8193 // CHECK10-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
8194 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
8195 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8196 // CHECK10-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
8197 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8198 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
8199 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8200 // CHECK10:       omp.inner.for.cond:
8201 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8202 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
8203 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
8204 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8205 // CHECK10:       omp.inner.for.body:
8206 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8207 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
8208 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8209 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
8210 // CHECK10-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
8211 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
8212 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
8213 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
8214 // CHECK10-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
8215 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8216 // CHECK10:       omp.body.continue:
8217 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8218 // CHECK10:       omp.inner.for.inc:
8219 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8220 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
8221 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
8222 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
8223 // CHECK10:       omp.inner.for.end:
8224 // CHECK10-NEXT:    store i32 123, i32* [[I]], align 4
8225 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
8226 // CHECK10-NEXT:    store i32 122, i32* [[DOTOMP_UB5]], align 4
8227 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
8228 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4
8229 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
8230 // CHECK10:       omp.inner.for.cond8:
8231 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8232 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
8233 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
8234 // CHECK10-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
8235 // CHECK10:       omp.inner.for.body10:
8236 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8237 // CHECK10-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
8238 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8239 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !6
8240 // CHECK10-NEXT:    [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8241 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !6
8242 // CHECK10-NEXT:    [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64
8243 // CHECK10-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i64 0, i64 [[IDXPROM14]]
8244 // CHECK10-NEXT:    store i32 0, i32* [[ARRAYIDX15]], align 4, !llvm.access.group !6
8245 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
8246 // CHECK10:       omp.body.continue16:
8247 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
8248 // CHECK10:       omp.inner.for.inc17:
8249 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8250 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1
8251 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
8252 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
8253 // CHECK10:       omp.inner.for.end19:
8254 // CHECK10-NEXT:    store i32 123, i32* [[I7]], align 4
8255 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
8256 // CHECK10-NEXT:    store i32 122, i32* [[DOTOMP_UB22]], align 4
8257 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
8258 // CHECK10-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV23]], align 4
8259 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
8260 // CHECK10:       omp.inner.for.cond25:
8261 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8262 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
8263 // CHECK10-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
8264 // CHECK10-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]]
8265 // CHECK10:       omp.inner.for.body27:
8266 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8267 // CHECK10-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1
8268 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
8269 // CHECK10-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
8270 // CHECK10-NEXT:    [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8271 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I24]], align 4, !llvm.access.group !9
8272 // CHECK10-NEXT:    [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64
8273 // CHECK10-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 [[IDXPROM31]]
8274 // CHECK10-NEXT:    store i32 0, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !9
8275 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE33:%.*]]
8276 // CHECK10:       omp.body.continue33:
8277 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC34:%.*]]
8278 // CHECK10:       omp.inner.for.inc34:
8279 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8280 // CHECK10-NEXT:    [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1
8281 // CHECK10-NEXT:    store i32 [[ADD35]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
8282 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
8283 // CHECK10:       omp.inner.for.end36:
8284 // CHECK10-NEXT:    store i32 123, i32* [[I24]], align 4
8285 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB38]], align 4
8286 // CHECK10-NEXT:    store i32 122, i32* [[DOTOMP_UB39]], align 4
8287 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB38]], align 4
8288 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV40]], align 4
8289 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND42:%.*]]
8290 // CHECK10:       omp.inner.for.cond42:
8291 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8292 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB39]], align 4, !llvm.access.group !12
8293 // CHECK10-NEXT:    [[CMP43:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8294 // CHECK10-NEXT:    br i1 [[CMP43]], label [[OMP_INNER_FOR_BODY44:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
8295 // CHECK10:       omp.inner.for.body44:
8296 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8297 // CHECK10-NEXT:    [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 1
8298 // CHECK10-NEXT:    [[ADD46:%.*]] = add nsw i32 0, [[MUL45]]
8299 // CHECK10-NEXT:    store i32 [[ADD46]], i32* [[I41]], align 4, !llvm.access.group !12
8300 // CHECK10-NEXT:    [[A47:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8301 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I41]], align 4, !llvm.access.group !12
8302 // CHECK10-NEXT:    [[IDXPROM48:%.*]] = sext i32 [[TMP22]] to i64
8303 // CHECK10-NEXT:    [[ARRAYIDX49:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A47]], i64 0, i64 [[IDXPROM48]]
8304 // CHECK10-NEXT:    store i32 0, i32* [[ARRAYIDX49]], align 4, !llvm.access.group !12
8305 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
8306 // CHECK10:       omp.body.continue50:
8307 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
8308 // CHECK10:       omp.inner.for.inc51:
8309 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8310 // CHECK10-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP23]], 1
8311 // CHECK10-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV40]], align 4, !llvm.access.group !12
8312 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND42]], !llvm.loop [[LOOP13:![0-9]+]]
8313 // CHECK10:       omp.inner.for.end53:
8314 // CHECK10-NEXT:    store i32 123, i32* [[I41]], align 4
8315 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
8316 // CHECK10-NEXT:    store i32 122, i32* [[DOTOMP_UB56]], align 4
8317 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
8318 // CHECK10-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV57]], align 4
8319 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
8320 // CHECK10:       omp.inner.for.cond59:
8321 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8322 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
8323 // CHECK10-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
8324 // CHECK10-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END70:%.*]]
8325 // CHECK10:       omp.inner.for.body61:
8326 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8327 // CHECK10-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP27]], 1
8328 // CHECK10-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
8329 // CHECK10-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
8330 // CHECK10-NEXT:    [[A64:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8331 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I58]], align 4, !llvm.access.group !15
8332 // CHECK10-NEXT:    [[IDXPROM65:%.*]] = sext i32 [[TMP28]] to i64
8333 // CHECK10-NEXT:    [[ARRAYIDX66:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A64]], i64 0, i64 [[IDXPROM65]]
8334 // CHECK10-NEXT:    store i32 0, i32* [[ARRAYIDX66]], align 4, !llvm.access.group !15
8335 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE67:%.*]]
8336 // CHECK10:       omp.body.continue67:
8337 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC68:%.*]]
8338 // CHECK10:       omp.inner.for.inc68:
8339 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8340 // CHECK10-NEXT:    [[ADD69:%.*]] = add nsw i32 [[TMP29]], 1
8341 // CHECK10-NEXT:    store i32 [[ADD69]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
8342 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
8343 // CHECK10:       omp.inner.for.end70:
8344 // CHECK10-NEXT:    store i32 123, i32* [[I58]], align 4
8345 // CHECK10-NEXT:    [[A71:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8346 // CHECK10-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A71]], i64 0, i64 0
8347 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX72]], align 4
8348 // CHECK10-NEXT:    ret i32 [[TMP30]]
8349 //
8350 //
8351 // CHECK11-LABEL: define {{[^@]+}}@_Z21teams_template_structv
8352 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
8353 // CHECK11-NEXT:  entry:
8354 // CHECK11-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
8355 // CHECK11-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
8356 // CHECK11-NEXT:    ret i32 [[CALL]]
8357 //
8358 //
8359 // CHECK11-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
8360 // CHECK11-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
8361 // CHECK11-NEXT:  entry:
8362 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
8363 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8364 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8365 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8366 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8367 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
8368 // CHECK11-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
8369 // CHECK11-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
8370 // CHECK11-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
8371 // CHECK11-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
8372 // CHECK11-NEXT:    [[I7:%.*]] = alloca i32, align 4
8373 // CHECK11-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
8374 // CHECK11-NEXT:    [[DOTOMP_LB20:%.*]] = alloca i32, align 4
8375 // CHECK11-NEXT:    [[DOTOMP_UB21:%.*]] = alloca i32, align 4
8376 // CHECK11-NEXT:    [[DOTOMP_IV22:%.*]] = alloca i32, align 4
8377 // CHECK11-NEXT:    [[I23:%.*]] = alloca i32, align 4
8378 // CHECK11-NEXT:    [[_TMP35:%.*]] = alloca i32, align 4
8379 // CHECK11-NEXT:    [[DOTOMP_LB36:%.*]] = alloca i32, align 4
8380 // CHECK11-NEXT:    [[DOTOMP_UB37:%.*]] = alloca i32, align 4
8381 // CHECK11-NEXT:    [[DOTOMP_IV38:%.*]] = alloca i32, align 4
8382 // CHECK11-NEXT:    [[I39:%.*]] = alloca i32, align 4
8383 // CHECK11-NEXT:    [[_TMP51:%.*]] = alloca i32, align 4
8384 // CHECK11-NEXT:    [[DOTOMP_LB52:%.*]] = alloca i32, align 4
8385 // CHECK11-NEXT:    [[DOTOMP_UB53:%.*]] = alloca i32, align 4
8386 // CHECK11-NEXT:    [[DOTOMP_IV54:%.*]] = alloca i32, align 4
8387 // CHECK11-NEXT:    [[I55:%.*]] = alloca i32, align 4
8388 // CHECK11-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
8389 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
8390 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8391 // CHECK11-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
8392 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8393 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
8394 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8395 // CHECK11:       omp.inner.for.cond:
8396 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8397 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
8398 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
8399 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8400 // CHECK11:       omp.inner.for.body:
8401 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8402 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
8403 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8404 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
8405 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
8406 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
8407 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP4]]
8408 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
8409 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8410 // CHECK11:       omp.body.continue:
8411 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8412 // CHECK11:       omp.inner.for.inc:
8413 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8414 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
8415 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8416 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
8417 // CHECK11:       omp.inner.for.end:
8418 // CHECK11-NEXT:    store i32 123, i32* [[I]], align 4
8419 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
8420 // CHECK11-NEXT:    store i32 122, i32* [[DOTOMP_UB5]], align 4
8421 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
8422 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4
8423 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
8424 // CHECK11:       omp.inner.for.cond8:
8425 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8426 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
8427 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
8428 // CHECK11-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]]
8429 // CHECK11:       omp.inner.for.body10:
8430 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8431 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
8432 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8433 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !7
8434 // CHECK11-NEXT:    [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8435 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !7
8436 // CHECK11-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i32 0, i32 [[TMP10]]
8437 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !7
8438 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE15:%.*]]
8439 // CHECK11:       omp.body.continue15:
8440 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC16:%.*]]
8441 // CHECK11:       omp.inner.for.inc16:
8442 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8443 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1
8444 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8445 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
8446 // CHECK11:       omp.inner.for.end18:
8447 // CHECK11-NEXT:    store i32 123, i32* [[I7]], align 4
8448 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB20]], align 4
8449 // CHECK11-NEXT:    store i32 122, i32* [[DOTOMP_UB21]], align 4
8450 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB20]], align 4
8451 // CHECK11-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV22]], align 4
8452 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND24:%.*]]
8453 // CHECK11:       omp.inner.for.cond24:
8454 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8455 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB21]], align 4, !llvm.access.group !10
8456 // CHECK11-NEXT:    [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
8457 // CHECK11-NEXT:    br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]]
8458 // CHECK11:       omp.inner.for.body26:
8459 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8460 // CHECK11-NEXT:    [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1
8461 // CHECK11-NEXT:    [[ADD28:%.*]] = add nsw i32 0, [[MUL27]]
8462 // CHECK11-NEXT:    store i32 [[ADD28]], i32* [[I23]], align 4, !llvm.access.group !10
8463 // CHECK11-NEXT:    [[A29:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8464 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I23]], align 4, !llvm.access.group !10
8465 // CHECK11-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A29]], i32 0, i32 [[TMP16]]
8466 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX30]], align 4, !llvm.access.group !10
8467 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE31:%.*]]
8468 // CHECK11:       omp.body.continue31:
8469 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC32:%.*]]
8470 // CHECK11:       omp.inner.for.inc32:
8471 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8472 // CHECK11-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1
8473 // CHECK11-NEXT:    store i32 [[ADD33]], i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8474 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]]
8475 // CHECK11:       omp.inner.for.end34:
8476 // CHECK11-NEXT:    store i32 123, i32* [[I23]], align 4
8477 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB36]], align 4
8478 // CHECK11-NEXT:    store i32 122, i32* [[DOTOMP_UB37]], align 4
8479 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB36]], align 4
8480 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV38]], align 4
8481 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND40:%.*]]
8482 // CHECK11:       omp.inner.for.cond40:
8483 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8484 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB37]], align 4, !llvm.access.group !13
8485 // CHECK11-NEXT:    [[CMP41:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8486 // CHECK11-NEXT:    br i1 [[CMP41]], label [[OMP_INNER_FOR_BODY42:%.*]], label [[OMP_INNER_FOR_END50:%.*]]
8487 // CHECK11:       omp.inner.for.body42:
8488 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8489 // CHECK11-NEXT:    [[MUL43:%.*]] = mul nsw i32 [[TMP21]], 1
8490 // CHECK11-NEXT:    [[ADD44:%.*]] = add nsw i32 0, [[MUL43]]
8491 // CHECK11-NEXT:    store i32 [[ADD44]], i32* [[I39]], align 4, !llvm.access.group !13
8492 // CHECK11-NEXT:    [[A45:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8493 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I39]], align 4, !llvm.access.group !13
8494 // CHECK11-NEXT:    [[ARRAYIDX46:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A45]], i32 0, i32 [[TMP22]]
8495 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX46]], align 4, !llvm.access.group !13
8496 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE47:%.*]]
8497 // CHECK11:       omp.body.continue47:
8498 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC48:%.*]]
8499 // CHECK11:       omp.inner.for.inc48:
8500 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8501 // CHECK11-NEXT:    [[ADD49:%.*]] = add nsw i32 [[TMP23]], 1
8502 // CHECK11-NEXT:    store i32 [[ADD49]], i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8503 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND40]], !llvm.loop [[LOOP14:![0-9]+]]
8504 // CHECK11:       omp.inner.for.end50:
8505 // CHECK11-NEXT:    store i32 123, i32* [[I39]], align 4
8506 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB52]], align 4
8507 // CHECK11-NEXT:    store i32 122, i32* [[DOTOMP_UB53]], align 4
8508 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB52]], align 4
8509 // CHECK11-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV54]], align 4
8510 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND56:%.*]]
8511 // CHECK11:       omp.inner.for.cond56:
8512 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8513 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB53]], align 4, !llvm.access.group !16
8514 // CHECK11-NEXT:    [[CMP57:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
8515 // CHECK11-NEXT:    br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
8516 // CHECK11:       omp.inner.for.body58:
8517 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8518 // CHECK11-NEXT:    [[MUL59:%.*]] = mul nsw i32 [[TMP27]], 1
8519 // CHECK11-NEXT:    [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
8520 // CHECK11-NEXT:    store i32 [[ADD60]], i32* [[I55]], align 4, !llvm.access.group !16
8521 // CHECK11-NEXT:    [[A61:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8522 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I55]], align 4, !llvm.access.group !16
8523 // CHECK11-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A61]], i32 0, i32 [[TMP28]]
8524 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !16
8525 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE63:%.*]]
8526 // CHECK11:       omp.body.continue63:
8527 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC64:%.*]]
8528 // CHECK11:       omp.inner.for.inc64:
8529 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8530 // CHECK11-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP29]], 1
8531 // CHECK11-NEXT:    store i32 [[ADD65]], i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8532 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP17:![0-9]+]]
8533 // CHECK11:       omp.inner.for.end66:
8534 // CHECK11-NEXT:    store i32 123, i32* [[I55]], align 4
8535 // CHECK11-NEXT:    [[A67:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8536 // CHECK11-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A67]], i32 0, i32 0
8537 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4
8538 // CHECK11-NEXT:    ret i32 [[TMP30]]
8539 //
8540 //
8541 // CHECK12-LABEL: define {{[^@]+}}@_Z21teams_template_structv
8542 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
8543 // CHECK12-NEXT:  entry:
8544 // CHECK12-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
8545 // CHECK12-NEXT:    [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
8546 // CHECK12-NEXT:    ret i32 [[CALL]]
8547 //
8548 //
8549 // CHECK12-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
8550 // CHECK12-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
8551 // CHECK12-NEXT:  entry:
8552 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
8553 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8554 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8555 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8556 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8557 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
8558 // CHECK12-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
8559 // CHECK12-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
8560 // CHECK12-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
8561 // CHECK12-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
8562 // CHECK12-NEXT:    [[I7:%.*]] = alloca i32, align 4
8563 // CHECK12-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
8564 // CHECK12-NEXT:    [[DOTOMP_LB20:%.*]] = alloca i32, align 4
8565 // CHECK12-NEXT:    [[DOTOMP_UB21:%.*]] = alloca i32, align 4
8566 // CHECK12-NEXT:    [[DOTOMP_IV22:%.*]] = alloca i32, align 4
8567 // CHECK12-NEXT:    [[I23:%.*]] = alloca i32, align 4
8568 // CHECK12-NEXT:    [[_TMP35:%.*]] = alloca i32, align 4
8569 // CHECK12-NEXT:    [[DOTOMP_LB36:%.*]] = alloca i32, align 4
8570 // CHECK12-NEXT:    [[DOTOMP_UB37:%.*]] = alloca i32, align 4
8571 // CHECK12-NEXT:    [[DOTOMP_IV38:%.*]] = alloca i32, align 4
8572 // CHECK12-NEXT:    [[I39:%.*]] = alloca i32, align 4
8573 // CHECK12-NEXT:    [[_TMP51:%.*]] = alloca i32, align 4
8574 // CHECK12-NEXT:    [[DOTOMP_LB52:%.*]] = alloca i32, align 4
8575 // CHECK12-NEXT:    [[DOTOMP_UB53:%.*]] = alloca i32, align 4
8576 // CHECK12-NEXT:    [[DOTOMP_IV54:%.*]] = alloca i32, align 4
8577 // CHECK12-NEXT:    [[I55:%.*]] = alloca i32, align 4
8578 // CHECK12-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
8579 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
8580 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8581 // CHECK12-NEXT:    store i32 122, i32* [[DOTOMP_UB]], align 4
8582 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8583 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
8584 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8585 // CHECK12:       omp.inner.for.cond:
8586 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8587 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
8588 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
8589 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8590 // CHECK12:       omp.inner.for.body:
8591 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8592 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
8593 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8594 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
8595 // CHECK12-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
8596 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
8597 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP4]]
8598 // CHECK12-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
8599 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8600 // CHECK12:       omp.body.continue:
8601 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8602 // CHECK12:       omp.inner.for.inc:
8603 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8604 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
8605 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
8606 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
8607 // CHECK12:       omp.inner.for.end:
8608 // CHECK12-NEXT:    store i32 123, i32* [[I]], align 4
8609 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
8610 // CHECK12-NEXT:    store i32 122, i32* [[DOTOMP_UB5]], align 4
8611 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
8612 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4
8613 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
8614 // CHECK12:       omp.inner.for.cond8:
8615 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8616 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
8617 // CHECK12-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
8618 // CHECK12-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]]
8619 // CHECK12:       omp.inner.for.body10:
8620 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8621 // CHECK12-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
8622 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8623 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !7
8624 // CHECK12-NEXT:    [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8625 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !7
8626 // CHECK12-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i32 0, i32 [[TMP10]]
8627 // CHECK12-NEXT:    store i32 0, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !7
8628 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE15:%.*]]
8629 // CHECK12:       omp.body.continue15:
8630 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC16:%.*]]
8631 // CHECK12:       omp.inner.for.inc16:
8632 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8633 // CHECK12-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1
8634 // CHECK12-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
8635 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
8636 // CHECK12:       omp.inner.for.end18:
8637 // CHECK12-NEXT:    store i32 123, i32* [[I7]], align 4
8638 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB20]], align 4
8639 // CHECK12-NEXT:    store i32 122, i32* [[DOTOMP_UB21]], align 4
8640 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB20]], align 4
8641 // CHECK12-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV22]], align 4
8642 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND24:%.*]]
8643 // CHECK12:       omp.inner.for.cond24:
8644 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8645 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB21]], align 4, !llvm.access.group !10
8646 // CHECK12-NEXT:    [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
8647 // CHECK12-NEXT:    br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]]
8648 // CHECK12:       omp.inner.for.body26:
8649 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8650 // CHECK12-NEXT:    [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1
8651 // CHECK12-NEXT:    [[ADD28:%.*]] = add nsw i32 0, [[MUL27]]
8652 // CHECK12-NEXT:    store i32 [[ADD28]], i32* [[I23]], align 4, !llvm.access.group !10
8653 // CHECK12-NEXT:    [[A29:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8654 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I23]], align 4, !llvm.access.group !10
8655 // CHECK12-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A29]], i32 0, i32 [[TMP16]]
8656 // CHECK12-NEXT:    store i32 0, i32* [[ARRAYIDX30]], align 4, !llvm.access.group !10
8657 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE31:%.*]]
8658 // CHECK12:       omp.body.continue31:
8659 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC32:%.*]]
8660 // CHECK12:       omp.inner.for.inc32:
8661 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8662 // CHECK12-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1
8663 // CHECK12-NEXT:    store i32 [[ADD33]], i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10
8664 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]]
8665 // CHECK12:       omp.inner.for.end34:
8666 // CHECK12-NEXT:    store i32 123, i32* [[I23]], align 4
8667 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB36]], align 4
8668 // CHECK12-NEXT:    store i32 122, i32* [[DOTOMP_UB37]], align 4
8669 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB36]], align 4
8670 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV38]], align 4
8671 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND40:%.*]]
8672 // CHECK12:       omp.inner.for.cond40:
8673 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8674 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB37]], align 4, !llvm.access.group !13
8675 // CHECK12-NEXT:    [[CMP41:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8676 // CHECK12-NEXT:    br i1 [[CMP41]], label [[OMP_INNER_FOR_BODY42:%.*]], label [[OMP_INNER_FOR_END50:%.*]]
8677 // CHECK12:       omp.inner.for.body42:
8678 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8679 // CHECK12-NEXT:    [[MUL43:%.*]] = mul nsw i32 [[TMP21]], 1
8680 // CHECK12-NEXT:    [[ADD44:%.*]] = add nsw i32 0, [[MUL43]]
8681 // CHECK12-NEXT:    store i32 [[ADD44]], i32* [[I39]], align 4, !llvm.access.group !13
8682 // CHECK12-NEXT:    [[A45:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8683 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I39]], align 4, !llvm.access.group !13
8684 // CHECK12-NEXT:    [[ARRAYIDX46:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A45]], i32 0, i32 [[TMP22]]
8685 // CHECK12-NEXT:    store i32 0, i32* [[ARRAYIDX46]], align 4, !llvm.access.group !13
8686 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE47:%.*]]
8687 // CHECK12:       omp.body.continue47:
8688 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC48:%.*]]
8689 // CHECK12:       omp.inner.for.inc48:
8690 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8691 // CHECK12-NEXT:    [[ADD49:%.*]] = add nsw i32 [[TMP23]], 1
8692 // CHECK12-NEXT:    store i32 [[ADD49]], i32* [[DOTOMP_IV38]], align 4, !llvm.access.group !13
8693 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND40]], !llvm.loop [[LOOP14:![0-9]+]]
8694 // CHECK12:       omp.inner.for.end50:
8695 // CHECK12-NEXT:    store i32 123, i32* [[I39]], align 4
8696 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB52]], align 4
8697 // CHECK12-NEXT:    store i32 122, i32* [[DOTOMP_UB53]], align 4
8698 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB52]], align 4
8699 // CHECK12-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV54]], align 4
8700 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND56:%.*]]
8701 // CHECK12:       omp.inner.for.cond56:
8702 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8703 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB53]], align 4, !llvm.access.group !16
8704 // CHECK12-NEXT:    [[CMP57:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
8705 // CHECK12-NEXT:    br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
8706 // CHECK12:       omp.inner.for.body58:
8707 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8708 // CHECK12-NEXT:    [[MUL59:%.*]] = mul nsw i32 [[TMP27]], 1
8709 // CHECK12-NEXT:    [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
8710 // CHECK12-NEXT:    store i32 [[ADD60]], i32* [[I55]], align 4, !llvm.access.group !16
8711 // CHECK12-NEXT:    [[A61:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8712 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I55]], align 4, !llvm.access.group !16
8713 // CHECK12-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A61]], i32 0, i32 [[TMP28]]
8714 // CHECK12-NEXT:    store i32 0, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !16
8715 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE63:%.*]]
8716 // CHECK12:       omp.body.continue63:
8717 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC64:%.*]]
8718 // CHECK12:       omp.inner.for.inc64:
8719 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8720 // CHECK12-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP29]], 1
8721 // CHECK12-NEXT:    store i32 [[ADD65]], i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !16
8722 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP17:![0-9]+]]
8723 // CHECK12:       omp.inner.for.end66:
8724 // CHECK12-NEXT:    store i32 123, i32* [[I55]], align 4
8725 // CHECK12-NEXT:    [[A67:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
8726 // CHECK12-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A67]], i32 0, i32 0
8727 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4
8728 // CHECK12-NEXT:    ret i32 [[TMP30]]
8729 //
8730 //
8731 // CHECK13-LABEL: define {{[^@]+}}@main
8732 // CHECK13-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
8733 // CHECK13-NEXT:  entry:
8734 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8735 // CHECK13-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
8736 // CHECK13-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
8737 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
8738 // CHECK13-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
8739 // CHECK13-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
8740 // CHECK13-NEXT:    [[M:%.*]] = alloca i32, align 4
8741 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8742 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
8743 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
8744 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
8745 // CHECK13-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
8746 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8747 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8748 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8749 // CHECK13-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
8750 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
8751 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
8752 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
8753 // CHECK13-NEXT:    [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
8754 // CHECK13-NEXT:    [[_TMP9:%.*]] = alloca i32, align 4
8755 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
8756 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
8757 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
8758 // CHECK13-NEXT:    [[N_CASTED19:%.*]] = alloca i64, align 8
8759 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8760 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
8761 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
8762 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
8763 // CHECK13-NEXT:    [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
8764 // CHECK13-NEXT:    [[_TMP26:%.*]] = alloca i32, align 4
8765 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
8766 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
8767 // CHECK13-NEXT:    [[N_CASTED35:%.*]] = alloca i64, align 8
8768 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
8769 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
8770 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
8771 // CHECK13-NEXT:    [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
8772 // CHECK13-NEXT:    [[_TMP41:%.*]] = alloca i32, align 4
8773 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
8774 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
8775 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
8776 // CHECK13-NEXT:    [[N_CASTED51:%.*]] = alloca i64, align 8
8777 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
8778 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
8779 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
8780 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
8781 // CHECK13-NEXT:    [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
8782 // CHECK13-NEXT:    [[_TMP59:%.*]] = alloca i32, align 4
8783 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
8784 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
8785 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8786 // CHECK13-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
8787 // CHECK13-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
8788 // CHECK13-NEXT:    store i32 100, i32* [[N]], align 4
8789 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
8790 // CHECK13-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
8791 // CHECK13-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
8792 // CHECK13-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
8793 // CHECK13-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
8794 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
8795 // CHECK13-NEXT:    store i32 10, i32* [[M]], align 4
8796 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
8797 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8798 // CHECK13-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4
8799 // CHECK13-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
8800 // CHECK13-NEXT:    [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
8801 // CHECK13-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8802 // CHECK13-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
8803 // CHECK13-NEXT:    store i64 [[TMP4]], i64* [[TMP7]], align 8
8804 // CHECK13-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8805 // CHECK13-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
8806 // CHECK13-NEXT:    store i64 [[TMP4]], i64* [[TMP9]], align 8
8807 // CHECK13-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
8808 // CHECK13-NEXT:    store i64 4, i64* [[TMP10]], align 8
8809 // CHECK13-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8810 // CHECK13-NEXT:    store i8* null, i8** [[TMP11]], align 8
8811 // CHECK13-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8812 // CHECK13-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
8813 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP13]], align 8
8814 // CHECK13-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8815 // CHECK13-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
8816 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP15]], align 8
8817 // CHECK13-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
8818 // CHECK13-NEXT:    store i64 8, i64* [[TMP16]], align 8
8819 // CHECK13-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8820 // CHECK13-NEXT:    store i8* null, i8** [[TMP17]], align 8
8821 // CHECK13-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8822 // CHECK13-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
8823 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 8
8824 // CHECK13-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8825 // CHECK13-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
8826 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 8
8827 // CHECK13-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
8828 // CHECK13-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
8829 // CHECK13-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8830 // CHECK13-NEXT:    store i8* null, i8** [[TMP23]], align 8
8831 // CHECK13-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8832 // CHECK13-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8833 // CHECK13-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
8834 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
8835 // CHECK13-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
8836 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8837 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
8838 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8839 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8840 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8841 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8842 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
8843 // CHECK13-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
8844 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
8845 // CHECK13-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
8846 // CHECK13-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8847 // CHECK13-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8848 // CHECK13:       omp_offload.failed:
8849 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
8850 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8851 // CHECK13:       omp_offload.cont:
8852 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
8853 // CHECK13-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
8854 // CHECK13-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
8855 // CHECK13-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
8856 // CHECK13-NEXT:    [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
8857 // CHECK13-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8858 // CHECK13-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
8859 // CHECK13-NEXT:    store i64 [[TMP34]], i64* [[TMP37]], align 8
8860 // CHECK13-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8861 // CHECK13-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
8862 // CHECK13-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
8863 // CHECK13-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
8864 // CHECK13-NEXT:    store i64 4, i64* [[TMP40]], align 8
8865 // CHECK13-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
8866 // CHECK13-NEXT:    store i8* null, i8** [[TMP41]], align 8
8867 // CHECK13-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
8868 // CHECK13-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
8869 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP43]], align 8
8870 // CHECK13-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
8871 // CHECK13-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
8872 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP45]], align 8
8873 // CHECK13-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
8874 // CHECK13-NEXT:    store i64 8, i64* [[TMP46]], align 8
8875 // CHECK13-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
8876 // CHECK13-NEXT:    store i8* null, i8** [[TMP47]], align 8
8877 // CHECK13-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
8878 // CHECK13-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
8879 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP49]], align 8
8880 // CHECK13-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
8881 // CHECK13-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
8882 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP51]], align 8
8883 // CHECK13-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
8884 // CHECK13-NEXT:    store i64 [[TMP35]], i64* [[TMP52]], align 8
8885 // CHECK13-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
8886 // CHECK13-NEXT:    store i8* null, i8** [[TMP53]], align 8
8887 // CHECK13-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8888 // CHECK13-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8889 // CHECK13-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
8890 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[N]], align 4
8891 // CHECK13-NEXT:    store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
8892 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
8893 // CHECK13-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
8894 // CHECK13-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
8895 // CHECK13-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
8896 // CHECK13-NEXT:    store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
8897 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
8898 // CHECK13-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
8899 // CHECK13-NEXT:    [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
8900 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
8901 // CHECK13-NEXT:    [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
8902 // CHECK13-NEXT:    [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
8903 // CHECK13-NEXT:    br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
8904 // CHECK13:       omp_offload.failed16:
8905 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
8906 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
8907 // CHECK13:       omp_offload.cont17:
8908 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[M]], align 4
8909 // CHECK13-NEXT:    store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
8910 // CHECK13-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N]], align 4
8911 // CHECK13-NEXT:    [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
8912 // CHECK13-NEXT:    store i32 [[TMP64]], i32* [[CONV20]], align 4
8913 // CHECK13-NEXT:    [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
8914 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
8915 // CHECK13-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
8916 // CHECK13-NEXT:    store i32 [[TMP66]], i32* [[CONV21]], align 4
8917 // CHECK13-NEXT:    [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
8918 // CHECK13-NEXT:    [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
8919 // CHECK13-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
8920 // CHECK13-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
8921 // CHECK13-NEXT:    store i64 [[TMP65]], i64* [[TMP70]], align 8
8922 // CHECK13-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
8923 // CHECK13-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
8924 // CHECK13-NEXT:    store i64 [[TMP65]], i64* [[TMP72]], align 8
8925 // CHECK13-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
8926 // CHECK13-NEXT:    store i64 4, i64* [[TMP73]], align 8
8927 // CHECK13-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
8928 // CHECK13-NEXT:    store i8* null, i8** [[TMP74]], align 8
8929 // CHECK13-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
8930 // CHECK13-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
8931 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP76]], align 8
8932 // CHECK13-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
8933 // CHECK13-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
8934 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP78]], align 8
8935 // CHECK13-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
8936 // CHECK13-NEXT:    store i64 8, i64* [[TMP79]], align 8
8937 // CHECK13-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
8938 // CHECK13-NEXT:    store i8* null, i8** [[TMP80]], align 8
8939 // CHECK13-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
8940 // CHECK13-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
8941 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP82]], align 8
8942 // CHECK13-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
8943 // CHECK13-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
8944 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 8
8945 // CHECK13-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
8946 // CHECK13-NEXT:    store i64 [[TMP68]], i64* [[TMP85]], align 8
8947 // CHECK13-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
8948 // CHECK13-NEXT:    store i8* null, i8** [[TMP86]], align 8
8949 // CHECK13-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
8950 // CHECK13-NEXT:    [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
8951 // CHECK13-NEXT:    store i64 [[TMP67]], i64* [[TMP88]], align 8
8952 // CHECK13-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
8953 // CHECK13-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
8954 // CHECK13-NEXT:    store i64 [[TMP67]], i64* [[TMP90]], align 8
8955 // CHECK13-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
8956 // CHECK13-NEXT:    store i64 4, i64* [[TMP91]], align 8
8957 // CHECK13-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
8958 // CHECK13-NEXT:    store i8* null, i8** [[TMP92]], align 8
8959 // CHECK13-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
8960 // CHECK13-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
8961 // CHECK13-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
8962 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[N]], align 4
8963 // CHECK13-NEXT:    store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
8964 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
8965 // CHECK13-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
8966 // CHECK13-NEXT:    [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
8967 // CHECK13-NEXT:    [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
8968 // CHECK13-NEXT:    store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
8969 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
8970 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
8971 // CHECK13-NEXT:    [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
8972 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
8973 // CHECK13-NEXT:    [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
8974 // CHECK13-NEXT:    [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
8975 // CHECK13-NEXT:    br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
8976 // CHECK13:       omp_offload.failed33:
8977 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
8978 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT34]]
8979 // CHECK13:       omp_offload.cont34:
8980 // CHECK13-NEXT:    [[TMP102:%.*]] = load i32, i32* [[N]], align 4
8981 // CHECK13-NEXT:    [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
8982 // CHECK13-NEXT:    store i32 [[TMP102]], i32* [[CONV36]], align 4
8983 // CHECK13-NEXT:    [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
8984 // CHECK13-NEXT:    [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
8985 // CHECK13-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
8986 // CHECK13-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
8987 // CHECK13-NEXT:    store i64 [[TMP103]], i64* [[TMP106]], align 8
8988 // CHECK13-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
8989 // CHECK13-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
8990 // CHECK13-NEXT:    store i64 [[TMP103]], i64* [[TMP108]], align 8
8991 // CHECK13-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
8992 // CHECK13-NEXT:    store i64 4, i64* [[TMP109]], align 8
8993 // CHECK13-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
8994 // CHECK13-NEXT:    store i8* null, i8** [[TMP110]], align 8
8995 // CHECK13-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
8996 // CHECK13-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
8997 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP112]], align 8
8998 // CHECK13-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
8999 // CHECK13-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
9000 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP114]], align 8
9001 // CHECK13-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
9002 // CHECK13-NEXT:    store i64 8, i64* [[TMP115]], align 8
9003 // CHECK13-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
9004 // CHECK13-NEXT:    store i8* null, i8** [[TMP116]], align 8
9005 // CHECK13-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
9006 // CHECK13-NEXT:    [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
9007 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP118]], align 8
9008 // CHECK13-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
9009 // CHECK13-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
9010 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP120]], align 8
9011 // CHECK13-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
9012 // CHECK13-NEXT:    store i64 [[TMP104]], i64* [[TMP121]], align 8
9013 // CHECK13-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
9014 // CHECK13-NEXT:    store i8* null, i8** [[TMP122]], align 8
9015 // CHECK13-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
9016 // CHECK13-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
9017 // CHECK13-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
9018 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[N]], align 4
9019 // CHECK13-NEXT:    store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
9020 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
9021 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
9022 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
9023 // CHECK13-NEXT:    [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
9024 // CHECK13-NEXT:    store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
9025 // CHECK13-NEXT:    [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
9026 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
9027 // CHECK13-NEXT:    [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
9028 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
9029 // CHECK13-NEXT:    [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
9030 // CHECK13-NEXT:    [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
9031 // CHECK13-NEXT:    br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
9032 // CHECK13:       omp_offload.failed48:
9033 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
9034 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT49]]
9035 // CHECK13:       omp_offload.cont49:
9036 // CHECK13-NEXT:    [[TMP132:%.*]] = load i32, i32* [[M]], align 4
9037 // CHECK13-NEXT:    store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
9038 // CHECK13-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
9039 // CHECK13-NEXT:    [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
9040 // CHECK13-NEXT:    store i32 [[TMP133]], i32* [[CONV52]], align 4
9041 // CHECK13-NEXT:    [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
9042 // CHECK13-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
9043 // CHECK13-NEXT:    [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
9044 // CHECK13-NEXT:    store i32 [[TMP135]], i32* [[CONV54]], align 4
9045 // CHECK13-NEXT:    [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
9046 // CHECK13-NEXT:    [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
9047 // CHECK13-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
9048 // CHECK13-NEXT:    [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
9049 // CHECK13-NEXT:    store i64 [[TMP134]], i64* [[TMP139]], align 8
9050 // CHECK13-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
9051 // CHECK13-NEXT:    [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
9052 // CHECK13-NEXT:    store i64 [[TMP134]], i64* [[TMP141]], align 8
9053 // CHECK13-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
9054 // CHECK13-NEXT:    store i64 4, i64* [[TMP142]], align 8
9055 // CHECK13-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
9056 // CHECK13-NEXT:    store i8* null, i8** [[TMP143]], align 8
9057 // CHECK13-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
9058 // CHECK13-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
9059 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP145]], align 8
9060 // CHECK13-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
9061 // CHECK13-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
9062 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[TMP147]], align 8
9063 // CHECK13-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
9064 // CHECK13-NEXT:    store i64 8, i64* [[TMP148]], align 8
9065 // CHECK13-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
9066 // CHECK13-NEXT:    store i8* null, i8** [[TMP149]], align 8
9067 // CHECK13-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
9068 // CHECK13-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
9069 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP151]], align 8
9070 // CHECK13-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
9071 // CHECK13-NEXT:    [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
9072 // CHECK13-NEXT:    store i32* [[VLA]], i32** [[TMP153]], align 8
9073 // CHECK13-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
9074 // CHECK13-NEXT:    store i64 [[TMP137]], i64* [[TMP154]], align 8
9075 // CHECK13-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
9076 // CHECK13-NEXT:    store i8* null, i8** [[TMP155]], align 8
9077 // CHECK13-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
9078 // CHECK13-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
9079 // CHECK13-NEXT:    store i64 [[TMP136]], i64* [[TMP157]], align 8
9080 // CHECK13-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
9081 // CHECK13-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
9082 // CHECK13-NEXT:    store i64 [[TMP136]], i64* [[TMP159]], align 8
9083 // CHECK13-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
9084 // CHECK13-NEXT:    store i64 4, i64* [[TMP160]], align 8
9085 // CHECK13-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
9086 // CHECK13-NEXT:    store i8* null, i8** [[TMP161]], align 8
9087 // CHECK13-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
9088 // CHECK13-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
9089 // CHECK13-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
9090 // CHECK13-NEXT:    [[TMP165:%.*]] = load i32, i32* [[N]], align 4
9091 // CHECK13-NEXT:    store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
9092 // CHECK13-NEXT:    [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
9093 // CHECK13-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
9094 // CHECK13-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
9095 // CHECK13-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
9096 // CHECK13-NEXT:    store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
9097 // CHECK13-NEXT:    [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
9098 // CHECK13-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
9099 // CHECK13-NEXT:    [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
9100 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
9101 // CHECK13-NEXT:    [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
9102 // CHECK13-NEXT:    [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
9103 // CHECK13-NEXT:    br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
9104 // CHECK13:       omp_offload.failed66:
9105 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
9106 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT67]]
9107 // CHECK13:       omp_offload.cont67:
9108 // CHECK13-NEXT:    [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
9109 // CHECK13-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
9110 // CHECK13-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
9111 // CHECK13-NEXT:    [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
9112 // CHECK13-NEXT:    call void @llvm.stackrestore(i8* [[TMP172]])
9113 // CHECK13-NEXT:    [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
9114 // CHECK13-NEXT:    ret i32 [[TMP173]]
9115 //
9116 //
9117 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
9118 // CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
9119 // CHECK13-NEXT:  entry:
9120 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9121 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9122 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9123 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9124 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9125 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9126 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9127 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9128 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9129 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9130 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9131 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9132 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
9133 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9134 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
9135 // CHECK13-NEXT:    ret void
9136 //
9137 //
9138 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
9139 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9140 // CHECK13-NEXT:  entry:
9141 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9142 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9143 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9144 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9145 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9146 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9147 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9148 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9149 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9150 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9151 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9152 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9153 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9154 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9155 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
9156 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9157 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9158 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9159 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9160 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9161 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9162 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9163 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9164 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9165 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9166 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
9167 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9168 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9169 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9170 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9171 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9172 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9173 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9174 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9175 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9176 // CHECK13:       omp.precond.then:
9177 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9178 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9179 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
9180 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9181 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9182 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9183 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
9184 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9185 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9186 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9187 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
9188 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9189 // CHECK13:       cond.true:
9190 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9191 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9192 // CHECK13:       cond.false:
9193 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9194 // CHECK13-NEXT:    br label [[COND_END]]
9195 // CHECK13:       cond.end:
9196 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
9197 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9198 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9199 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
9200 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9201 // CHECK13:       omp.inner.for.cond:
9202 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
9203 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
9204 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
9205 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9206 // CHECK13:       omp.inner.for.body:
9207 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !13
9208 // CHECK13-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
9209 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
9210 // CHECK13-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9211 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !13
9212 // CHECK13-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9213 // CHECK13-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !13
9214 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !13
9215 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !13
9216 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9217 // CHECK13:       omp.inner.for.inc:
9218 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
9219 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !13
9220 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9221 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
9222 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
9223 // CHECK13:       omp.inner.for.end:
9224 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9225 // CHECK13:       omp.loop.exit:
9226 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9227 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9228 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9229 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9230 // CHECK13-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9231 // CHECK13-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9232 // CHECK13:       .omp.final.then:
9233 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9234 // CHECK13-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
9235 // CHECK13-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9236 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9237 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9238 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
9239 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9240 // CHECK13:       .omp.final.done:
9241 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9242 // CHECK13:       omp.precond.end:
9243 // CHECK13-NEXT:    ret void
9244 //
9245 //
9246 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..1
9247 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9248 // CHECK13-NEXT:  entry:
9249 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9250 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9251 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9252 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9253 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9254 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9255 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9256 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9257 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9258 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9259 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9260 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9261 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9262 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9263 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9264 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9265 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
9266 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9267 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9268 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9269 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9270 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9271 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9272 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9273 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9274 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9275 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9276 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9277 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
9278 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9279 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9280 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9281 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9282 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9283 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9284 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9285 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9286 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9287 // CHECK13:       omp.precond.then:
9288 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9289 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9290 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
9291 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9292 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
9293 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9294 // CHECK13-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
9295 // CHECK13-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
9296 // CHECK13-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
9297 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9298 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9299 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9300 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9301 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9302 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9303 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9304 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9305 // CHECK13-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9306 // CHECK13:       cond.true:
9307 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9308 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9309 // CHECK13:       cond.false:
9310 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9311 // CHECK13-NEXT:    br label [[COND_END]]
9312 // CHECK13:       cond.end:
9313 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9314 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9315 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9316 // CHECK13-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9317 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9318 // CHECK13:       omp.inner.for.cond:
9319 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9320 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !17
9321 // CHECK13-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9322 // CHECK13-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9323 // CHECK13:       omp.inner.for.body:
9324 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9325 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
9326 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9327 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !17
9328 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !17
9329 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
9330 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
9331 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !17
9332 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9333 // CHECK13:       omp.body.continue:
9334 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9335 // CHECK13:       omp.inner.for.inc:
9336 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9337 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
9338 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9339 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
9340 // CHECK13:       omp.inner.for.end:
9341 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9342 // CHECK13:       omp.loop.exit:
9343 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9344 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
9345 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
9346 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9347 // CHECK13-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
9348 // CHECK13-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9349 // CHECK13:       .omp.final.then:
9350 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9351 // CHECK13-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
9352 // CHECK13-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
9353 // CHECK13-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
9354 // CHECK13-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
9355 // CHECK13-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
9356 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9357 // CHECK13:       .omp.final.done:
9358 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9359 // CHECK13:       omp.precond.end:
9360 // CHECK13-NEXT:    ret void
9361 //
9362 //
9363 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
9364 // CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9365 // CHECK13-NEXT:  entry:
9366 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9367 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9368 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9369 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9370 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9371 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9372 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9373 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9374 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9375 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9376 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9377 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9378 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
9379 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9380 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
9381 // CHECK13-NEXT:    ret void
9382 //
9383 //
9384 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..2
9385 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9386 // CHECK13-NEXT:  entry:
9387 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9388 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9389 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9390 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9391 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9392 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9393 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9394 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9395 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9396 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9397 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9398 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9399 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9400 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9401 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
9402 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9403 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9404 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9405 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9406 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9407 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9408 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9409 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9410 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9411 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9412 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
9413 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9414 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9415 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9416 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9417 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9418 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9419 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9420 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9421 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9422 // CHECK13:       omp.precond.then:
9423 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9424 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9425 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
9426 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9427 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9428 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9429 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
9430 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9431 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9432 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9433 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
9434 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9435 // CHECK13:       cond.true:
9436 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9437 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9438 // CHECK13:       cond.false:
9439 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9440 // CHECK13-NEXT:    br label [[COND_END]]
9441 // CHECK13:       cond.end:
9442 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
9443 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9444 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9445 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
9446 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9447 // CHECK13:       omp.inner.for.cond:
9448 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
9449 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
9450 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
9451 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9452 // CHECK13:       omp.inner.for.body:
9453 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !22
9454 // CHECK13-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
9455 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
9456 // CHECK13-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9457 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !22
9458 // CHECK13-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9459 // CHECK13-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !22
9460 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !22
9461 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !22
9462 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9463 // CHECK13:       omp.inner.for.inc:
9464 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
9465 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !22
9466 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9467 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
9468 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
9469 // CHECK13:       omp.inner.for.end:
9470 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9471 // CHECK13:       omp.loop.exit:
9472 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9473 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9474 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9475 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9476 // CHECK13-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9477 // CHECK13-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9478 // CHECK13:       .omp.final.then:
9479 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9480 // CHECK13-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
9481 // CHECK13-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9482 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9483 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9484 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
9485 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9486 // CHECK13:       .omp.final.done:
9487 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9488 // CHECK13:       omp.precond.end:
9489 // CHECK13-NEXT:    ret void
9490 //
9491 //
9492 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..3
9493 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9494 // CHECK13-NEXT:  entry:
9495 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9496 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9497 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9498 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9499 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9500 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9501 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9502 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9503 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9504 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9505 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9506 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9507 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9508 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9509 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9510 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9511 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
9512 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9513 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9514 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9515 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9516 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9517 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9518 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9519 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9520 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9521 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9522 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9523 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
9524 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9525 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9526 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9527 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9528 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9529 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9530 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9531 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9532 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9533 // CHECK13:       omp.precond.then:
9534 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9535 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9536 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
9537 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9538 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
9539 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9540 // CHECK13-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
9541 // CHECK13-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
9542 // CHECK13-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
9543 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9544 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9545 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9546 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9547 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9548 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9549 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9550 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9551 // CHECK13-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9552 // CHECK13:       cond.true:
9553 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9554 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9555 // CHECK13:       cond.false:
9556 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9557 // CHECK13-NEXT:    br label [[COND_END]]
9558 // CHECK13:       cond.end:
9559 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9560 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9561 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9562 // CHECK13-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9563 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9564 // CHECK13:       omp.inner.for.cond:
9565 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
9566 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
9567 // CHECK13-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9568 // CHECK13-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9569 // CHECK13:       omp.inner.for.body:
9570 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
9571 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
9572 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9573 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !25
9574 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !25
9575 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
9576 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
9577 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
9578 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9579 // CHECK13:       omp.body.continue:
9580 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9581 // CHECK13:       omp.inner.for.inc:
9582 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
9583 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
9584 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
9585 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
9586 // CHECK13:       omp.inner.for.end:
9587 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9588 // CHECK13:       omp.loop.exit:
9589 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9590 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
9591 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
9592 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9593 // CHECK13-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
9594 // CHECK13-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9595 // CHECK13:       .omp.final.then:
9596 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9597 // CHECK13-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
9598 // CHECK13-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
9599 // CHECK13-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
9600 // CHECK13-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
9601 // CHECK13-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
9602 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9603 // CHECK13:       .omp.final.done:
9604 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9605 // CHECK13:       omp.precond.end:
9606 // CHECK13-NEXT:    ret void
9607 //
9608 //
9609 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
9610 // CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9611 // CHECK13-NEXT:  entry:
9612 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9613 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9614 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9615 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9616 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9617 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9618 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9619 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9620 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9621 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9622 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9623 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9624 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9625 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9626 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9627 // CHECK13-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9628 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
9629 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9630 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
9631 // CHECK13-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
9632 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
9633 // CHECK13-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
9634 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
9635 // CHECK13-NEXT:    ret void
9636 //
9637 //
9638 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5
9639 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9640 // CHECK13-NEXT:  entry:
9641 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9642 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9643 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9644 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9645 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9646 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9647 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9648 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9649 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9650 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
9651 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9652 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9653 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9654 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9655 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9656 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
9657 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9658 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9659 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9660 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9661 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9662 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9663 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9664 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9665 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9666 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9667 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9668 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9669 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9670 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9671 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9672 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9673 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9674 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
9675 // CHECK13-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
9676 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9677 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9678 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9679 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9680 // CHECK13:       omp.precond.then:
9681 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9682 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9683 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
9684 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9685 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9686 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
9687 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9688 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
9689 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
9690 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9691 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9692 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
9693 // CHECK13-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9694 // CHECK13:       cond.true:
9695 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9696 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9697 // CHECK13:       cond.false:
9698 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9699 // CHECK13-NEXT:    br label [[COND_END]]
9700 // CHECK13:       cond.end:
9701 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
9702 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9703 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9704 // CHECK13-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
9705 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9706 // CHECK13:       omp.inner.for.cond:
9707 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
9708 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
9709 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
9710 // CHECK13-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
9711 // CHECK13-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9712 // CHECK13:       omp.inner.for.body:
9713 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
9714 // CHECK13-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9715 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9716 // CHECK13-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
9717 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !28
9718 // CHECK13-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9719 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4, !llvm.access.group !28
9720 // CHECK13-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !28
9721 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !28
9722 // CHECK13-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
9723 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28
9724 // CHECK13-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28
9725 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28
9726 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9727 // CHECK13:       omp.inner.for.inc:
9728 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
9729 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
9730 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
9731 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
9732 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
9733 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
9734 // CHECK13-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
9735 // CHECK13-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
9736 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9737 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
9738 // CHECK13-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
9739 // CHECK13-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9740 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9741 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
9742 // CHECK13-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
9743 // CHECK13-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
9744 // CHECK13:       cond.true14:
9745 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
9746 // CHECK13-NEXT:    br label [[COND_END16:%.*]]
9747 // CHECK13:       cond.false15:
9748 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9749 // CHECK13-NEXT:    br label [[COND_END16]]
9750 // CHECK13:       cond.end16:
9751 // CHECK13-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
9752 // CHECK13-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
9753 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
9754 // CHECK13-NEXT:    store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
9755 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
9756 // CHECK13:       omp.inner.for.end:
9757 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9758 // CHECK13:       omp.loop.exit:
9759 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9760 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
9761 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
9762 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9763 // CHECK13-NEXT:    [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
9764 // CHECK13-NEXT:    br i1 [[TMP38]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9765 // CHECK13:       .omp.final.then:
9766 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9767 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP39]], 0
9768 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
9769 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV19]], 1
9770 // CHECK13-NEXT:    [[ADD20:%.*]] = add nsw i32 0, [[MUL]]
9771 // CHECK13-NEXT:    store i32 [[ADD20]], i32* [[I5]], align 4
9772 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9773 // CHECK13:       .omp.final.done:
9774 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9775 // CHECK13:       omp.precond.end:
9776 // CHECK13-NEXT:    ret void
9777 //
9778 //
9779 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6
9780 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9781 // CHECK13-NEXT:  entry:
9782 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9783 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9784 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9785 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9786 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9787 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9788 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9789 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9790 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9791 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9792 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9793 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
9794 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9795 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9796 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9797 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9798 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9799 // CHECK13-NEXT:    [[I7:%.*]] = alloca i32, align 4
9800 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9801 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9802 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9803 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9804 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9805 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9806 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9807 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9808 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9809 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9810 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9811 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9812 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9813 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9814 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9815 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9816 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9817 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
9818 // CHECK13-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
9819 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9820 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9821 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9822 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9823 // CHECK13:       omp.precond.then:
9824 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9825 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9826 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
9827 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9828 // CHECK13-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
9829 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9830 // CHECK13-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
9831 // CHECK13-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
9832 // CHECK13-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
9833 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9834 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9835 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9836 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9837 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9838 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9839 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9840 // CHECK13-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9841 // CHECK13-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9842 // CHECK13:       cond.true:
9843 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9844 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9845 // CHECK13:       cond.false:
9846 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9847 // CHECK13-NEXT:    br label [[COND_END]]
9848 // CHECK13:       cond.end:
9849 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9850 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9851 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9852 // CHECK13-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9853 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9854 // CHECK13:       omp.inner.for.cond:
9855 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
9856 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
9857 // CHECK13-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9858 // CHECK13-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9859 // CHECK13:       omp.inner.for.body:
9860 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
9861 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
9862 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9863 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !31
9864 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !31
9865 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
9866 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
9867 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !31
9868 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9869 // CHECK13:       omp.body.continue:
9870 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9871 // CHECK13:       omp.inner.for.inc:
9872 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
9873 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
9874 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
9875 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
9876 // CHECK13:       omp.inner.for.end:
9877 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9878 // CHECK13:       omp.loop.exit:
9879 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9880 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
9881 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
9882 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9883 // CHECK13-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
9884 // CHECK13-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9885 // CHECK13:       .omp.final.then:
9886 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9887 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP24]], 0
9888 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
9889 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
9890 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
9891 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I7]], align 4
9892 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9893 // CHECK13:       .omp.final.done:
9894 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
9895 // CHECK13:       omp.precond.end:
9896 // CHECK13-NEXT:    ret void
9897 //
9898 //
9899 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
9900 // CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9901 // CHECK13-NEXT:  entry:
9902 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9903 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9904 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9905 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9906 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9907 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9908 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9909 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9910 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9911 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9912 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9913 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9914 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
9915 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9916 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
9917 // CHECK13-NEXT:    ret void
9918 //
9919 //
9920 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8
9921 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9922 // CHECK13-NEXT:  entry:
9923 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9924 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9925 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9926 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9927 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9928 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9929 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9930 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9931 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9932 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
9933 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9934 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9935 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9936 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9937 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
9938 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9939 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9940 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9941 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9942 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9943 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9944 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9945 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9946 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
9947 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9948 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
9949 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9950 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
9951 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9952 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9953 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9954 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
9955 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9956 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
9957 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9958 // CHECK13:       omp.precond.then:
9959 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9960 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9961 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
9962 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9963 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9964 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9965 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
9966 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9967 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9968 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9969 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
9970 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9971 // CHECK13:       cond.true:
9972 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9973 // CHECK13-NEXT:    br label [[COND_END:%.*]]
9974 // CHECK13:       cond.false:
9975 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9976 // CHECK13-NEXT:    br label [[COND_END]]
9977 // CHECK13:       cond.end:
9978 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
9979 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9980 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9981 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
9982 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9983 // CHECK13:       omp.inner.for.cond:
9984 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
9985 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
9986 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
9987 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9988 // CHECK13:       omp.inner.for.body:
9989 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !34
9990 // CHECK13-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
9991 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
9992 // CHECK13-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9993 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !34
9994 // CHECK13-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9995 // CHECK13-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34
9996 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34
9997 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34
9998 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9999 // CHECK13:       omp.inner.for.inc:
10000 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
10001 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !34
10002 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10003 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
10004 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
10005 // CHECK13:       omp.inner.for.end:
10006 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10007 // CHECK13:       omp.loop.exit:
10008 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10009 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
10010 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
10011 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10012 // CHECK13-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
10013 // CHECK13-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10014 // CHECK13:       .omp.final.then:
10015 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10016 // CHECK13-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
10017 // CHECK13-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
10018 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
10019 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
10020 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
10021 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10022 // CHECK13:       .omp.final.done:
10023 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
10024 // CHECK13:       omp.precond.end:
10025 // CHECK13-NEXT:    ret void
10026 //
10027 //
10028 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9
10029 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
10030 // CHECK13-NEXT:  entry:
10031 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10032 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10033 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10034 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10035 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10036 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10037 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
10038 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10039 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10040 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10041 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10042 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10043 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10044 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10045 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10046 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10047 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
10048 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10049 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10050 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10051 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10052 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10053 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10054 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
10055 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10056 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10057 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
10058 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10059 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
10060 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10061 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
10062 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10063 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10064 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10065 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
10066 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10067 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
10068 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10069 // CHECK13:       omp.precond.then:
10070 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10071 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10072 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
10073 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10074 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
10075 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10076 // CHECK13-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
10077 // CHECK13-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
10078 // CHECK13-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
10079 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10080 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10081 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10082 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10083 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10084 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10085 // CHECK13-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
10086 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10087 // CHECK13:       omp.dispatch.cond:
10088 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10089 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
10090 // CHECK13-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10091 // CHECK13-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
10092 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10093 // CHECK13:       omp.dispatch.body:
10094 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10095 // CHECK13-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
10096 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10097 // CHECK13:       omp.inner.for.cond:
10098 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
10099 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
10100 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
10101 // CHECK13-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10102 // CHECK13:       omp.inner.for.body:
10103 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
10104 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
10105 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10106 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !37
10107 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !37
10108 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
10109 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
10110 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !37
10111 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10112 // CHECK13:       omp.body.continue:
10113 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10114 // CHECK13:       omp.inner.for.inc:
10115 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
10116 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
10117 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
10118 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
10119 // CHECK13:       omp.inner.for.end:
10120 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10121 // CHECK13:       omp.dispatch.inc:
10122 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
10123 // CHECK13:       omp.dispatch.end:
10124 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10125 // CHECK13-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
10126 // CHECK13-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10127 // CHECK13:       .omp.final.then:
10128 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10129 // CHECK13-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0
10130 // CHECK13-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
10131 // CHECK13-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
10132 // CHECK13-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
10133 // CHECK13-NEXT:    store i32 [[ADD11]], i32* [[I5]], align 4
10134 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10135 // CHECK13:       .omp.final.done:
10136 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
10137 // CHECK13:       omp.precond.end:
10138 // CHECK13-NEXT:    ret void
10139 //
10140 //
10141 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
10142 // CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10143 // CHECK13-NEXT:  entry:
10144 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10145 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10146 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
10147 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10148 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10149 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10150 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10151 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10152 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
10153 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10154 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10155 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10156 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
10157 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10158 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10159 // CHECK13-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10160 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
10161 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
10162 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
10163 // CHECK13-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10164 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
10165 // CHECK13-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
10166 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
10167 // CHECK13-NEXT:    ret void
10168 //
10169 //
10170 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11
10171 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10172 // CHECK13-NEXT:  entry:
10173 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10174 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10175 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10176 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10177 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
10178 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10179 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10180 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10181 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10182 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
10183 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10184 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10185 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10186 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10187 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10188 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
10189 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10190 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10191 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10192 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10193 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10194 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10195 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
10196 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10197 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10198 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10199 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
10200 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10201 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10202 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10203 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10204 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
10205 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10206 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
10207 // CHECK13-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
10208 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
10209 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10210 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
10211 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10212 // CHECK13:       omp.precond.then:
10213 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10214 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10215 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
10216 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10217 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10218 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10219 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
10220 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10221 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10222 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10223 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
10224 // CHECK13-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10225 // CHECK13:       cond.true:
10226 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10227 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10228 // CHECK13:       cond.false:
10229 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10230 // CHECK13-NEXT:    br label [[COND_END]]
10231 // CHECK13:       cond.end:
10232 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
10233 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10234 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10235 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
10236 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10237 // CHECK13:       omp.inner.for.cond:
10238 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
10239 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
10240 // CHECK13-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
10241 // CHECK13-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10242 // CHECK13:       omp.inner.for.body:
10243 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !40
10244 // CHECK13-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
10245 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
10246 // CHECK13-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
10247 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !40
10248 // CHECK13-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10249 // CHECK13-NEXT:    store i32 [[TMP19]], i32* [[CONV8]], align 4, !llvm.access.group !40
10250 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !40
10251 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !40
10252 // CHECK13-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10253 // CHECK13-NEXT:    store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40
10254 // CHECK13-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40
10255 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40
10256 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10257 // CHECK13:       omp.inner.for.inc:
10258 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
10259 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !40
10260 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
10261 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
10262 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
10263 // CHECK13:       omp.inner.for.end:
10264 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10265 // CHECK13:       omp.loop.exit:
10266 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10267 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
10268 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
10269 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10270 // CHECK13-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
10271 // CHECK13-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10272 // CHECK13:       .omp.final.then:
10273 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10274 // CHECK13-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP29]], 0
10275 // CHECK13-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10276 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV11]], 1
10277 // CHECK13-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL]]
10278 // CHECK13-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
10279 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10280 // CHECK13:       .omp.final.done:
10281 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
10282 // CHECK13:       omp.precond.end:
10283 // CHECK13-NEXT:    ret void
10284 //
10285 //
10286 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12
10287 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10288 // CHECK13-NEXT:  entry:
10289 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10290 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10291 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10292 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10293 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10294 // CHECK13-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10295 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
10296 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10297 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10298 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10299 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10300 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
10301 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10302 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10303 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10304 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10305 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10306 // CHECK13-NEXT:    [[I7:%.*]] = alloca i32, align 4
10307 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10308 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10309 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10310 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10311 // CHECK13-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10312 // CHECK13-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10313 // CHECK13-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
10314 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10315 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10316 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10317 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
10318 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10319 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10320 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10321 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10322 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
10323 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10324 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
10325 // CHECK13-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
10326 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
10327 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10328 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
10329 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10330 // CHECK13:       omp.precond.then:
10331 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10332 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10333 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
10334 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10335 // CHECK13-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
10336 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10337 // CHECK13-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
10338 // CHECK13-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
10339 // CHECK13-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
10340 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10341 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10342 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
10343 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10344 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10345 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10346 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
10347 // CHECK13-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
10348 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10349 // CHECK13:       omp.dispatch.cond:
10350 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10351 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
10352 // CHECK13-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10353 // CHECK13-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
10354 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10355 // CHECK13:       omp.dispatch.body:
10356 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10357 // CHECK13-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10358 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10359 // CHECK13:       omp.inner.for.cond:
10360 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
10361 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43
10362 // CHECK13-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10363 // CHECK13-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10364 // CHECK13:       omp.inner.for.body:
10365 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
10366 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10367 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10368 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !43
10369 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !43
10370 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
10371 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
10372 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !43
10373 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10374 // CHECK13:       omp.body.continue:
10375 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10376 // CHECK13:       omp.inner.for.inc:
10377 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
10378 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
10379 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
10380 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
10381 // CHECK13:       omp.inner.for.end:
10382 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10383 // CHECK13:       omp.dispatch.inc:
10384 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
10385 // CHECK13:       omp.dispatch.end:
10386 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10387 // CHECK13-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
10388 // CHECK13-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10389 // CHECK13:       .omp.final.then:
10390 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10391 // CHECK13-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP24]], 0
10392 // CHECK13-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10393 // CHECK13-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10394 // CHECK13-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10395 // CHECK13-NEXT:    store i32 [[ADD13]], i32* [[I7]], align 4
10396 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10397 // CHECK13:       .omp.final.done:
10398 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
10399 // CHECK13:       omp.precond.end:
10400 // CHECK13-NEXT:    ret void
10401 //
10402 //
10403 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
10404 // CHECK13-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
10405 // CHECK13-NEXT:  entry:
10406 // CHECK13-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
10407 // CHECK13-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
10408 // CHECK13-NEXT:    [[M:%.*]] = alloca i32, align 4
10409 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
10410 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
10411 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
10412 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10413 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
10414 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
10415 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
10416 // CHECK13-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
10417 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10418 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10419 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
10420 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
10421 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
10422 // CHECK13-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
10423 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
10424 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
10425 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
10426 // CHECK13-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
10427 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
10428 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
10429 // CHECK13-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
10430 // CHECK13-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
10431 // CHECK13-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
10432 // CHECK13-NEXT:    [[_TMP25:%.*]] = alloca i32, align 4
10433 // CHECK13-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
10434 // CHECK13-NEXT:    store i32 10, i32* [[M]], align 4
10435 // CHECK13-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10436 // CHECK13-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
10437 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
10438 // CHECK13-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10439 // CHECK13-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
10440 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
10441 // CHECK13-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
10442 // CHECK13-NEXT:    store i8* null, i8** [[TMP4]], align 8
10443 // CHECK13-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10444 // CHECK13-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10445 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
10446 // CHECK13-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
10447 // CHECK13-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
10448 // CHECK13-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10449 // CHECK13:       omp_offload.failed:
10450 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
10451 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10452 // CHECK13:       omp_offload.cont:
10453 // CHECK13-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
10454 // CHECK13-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
10455 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
10456 // CHECK13-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
10457 // CHECK13-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
10458 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
10459 // CHECK13-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
10460 // CHECK13-NEXT:    store i8* null, i8** [[TMP13]], align 8
10461 // CHECK13-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
10462 // CHECK13-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
10463 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
10464 // CHECK13-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
10465 // CHECK13-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10466 // CHECK13-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
10467 // CHECK13:       omp_offload.failed5:
10468 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
10469 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
10470 // CHECK13:       omp_offload.cont6:
10471 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
10472 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
10473 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10474 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10475 // CHECK13-NEXT:    store i32 [[TMP19]], i32* [[CONV]], align 4
10476 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
10477 // CHECK13-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
10478 // CHECK13-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
10479 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
10480 // CHECK13-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
10481 // CHECK13-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
10482 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
10483 // CHECK13-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
10484 // CHECK13-NEXT:    store i8* null, i8** [[TMP25]], align 8
10485 // CHECK13-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
10486 // CHECK13-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
10487 // CHECK13-NEXT:    store i64 [[TMP20]], i64* [[TMP27]], align 8
10488 // CHECK13-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
10489 // CHECK13-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
10490 // CHECK13-NEXT:    store i64 [[TMP20]], i64* [[TMP29]], align 8
10491 // CHECK13-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
10492 // CHECK13-NEXT:    store i8* null, i8** [[TMP30]], align 8
10493 // CHECK13-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
10494 // CHECK13-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
10495 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
10496 // CHECK13-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
10497 // CHECK13-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
10498 // CHECK13-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
10499 // CHECK13:       omp_offload.failed11:
10500 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
10501 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
10502 // CHECK13:       omp_offload.cont12:
10503 // CHECK13-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
10504 // CHECK13-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
10505 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
10506 // CHECK13-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
10507 // CHECK13-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
10508 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
10509 // CHECK13-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
10510 // CHECK13-NEXT:    store i8* null, i8** [[TMP39]], align 8
10511 // CHECK13-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
10512 // CHECK13-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
10513 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
10514 // CHECK13-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
10515 // CHECK13-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
10516 // CHECK13-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
10517 // CHECK13:       omp_offload.failed17:
10518 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
10519 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
10520 // CHECK13:       omp_offload.cont18:
10521 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
10522 // CHECK13-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
10523 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
10524 // CHECK13-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
10525 // CHECK13-NEXT:    store i32 [[TMP45]], i32* [[CONV21]], align 4
10526 // CHECK13-NEXT:    [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
10527 // CHECK13-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
10528 // CHECK13-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
10529 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
10530 // CHECK13-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
10531 // CHECK13-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
10532 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
10533 // CHECK13-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
10534 // CHECK13-NEXT:    store i8* null, i8** [[TMP51]], align 8
10535 // CHECK13-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
10536 // CHECK13-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
10537 // CHECK13-NEXT:    store i64 [[TMP46]], i64* [[TMP53]], align 8
10538 // CHECK13-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
10539 // CHECK13-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
10540 // CHECK13-NEXT:    store i64 [[TMP46]], i64* [[TMP55]], align 8
10541 // CHECK13-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
10542 // CHECK13-NEXT:    store i8* null, i8** [[TMP56]], align 8
10543 // CHECK13-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
10544 // CHECK13-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
10545 // CHECK13-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
10546 // CHECK13-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
10547 // CHECK13-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
10548 // CHECK13-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
10549 // CHECK13:       omp_offload.failed26:
10550 // CHECK13-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
10551 // CHECK13-NEXT:    br label [[OMP_OFFLOAD_CONT27]]
10552 // CHECK13:       omp_offload.cont27:
10553 // CHECK13-NEXT:    ret i32 0
10554 //
10555 //
10556 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
10557 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10558 // CHECK13-NEXT:  entry:
10559 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10560 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10561 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10562 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
10563 // CHECK13-NEXT:    ret void
10564 //
10565 //
10566 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14
10567 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10568 // CHECK13-NEXT:  entry:
10569 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10570 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10571 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10572 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10573 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10574 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10575 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10576 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10577 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10578 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10579 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10580 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10581 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10582 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10583 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10584 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
10585 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10586 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10587 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10588 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
10589 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10590 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10591 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10592 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10593 // CHECK13:       cond.true:
10594 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10595 // CHECK13:       cond.false:
10596 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10597 // CHECK13-NEXT:    br label [[COND_END]]
10598 // CHECK13:       cond.end:
10599 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10600 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10601 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10602 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10603 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10604 // CHECK13:       omp.inner.for.cond:
10605 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
10606 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
10607 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10608 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10609 // CHECK13:       omp.inner.for.body:
10610 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !46
10611 // CHECK13-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10612 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
10613 // CHECK13-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10614 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46
10615 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10616 // CHECK13:       omp.inner.for.inc:
10617 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
10618 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !46
10619 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
10620 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
10621 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
10622 // CHECK13:       omp.inner.for.end:
10623 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10624 // CHECK13:       omp.loop.exit:
10625 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
10626 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10627 // CHECK13-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10628 // CHECK13-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10629 // CHECK13:       .omp.final.then:
10630 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
10631 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10632 // CHECK13:       .omp.final.done:
10633 // CHECK13-NEXT:    ret void
10634 //
10635 //
10636 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15
10637 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10638 // CHECK13-NEXT:  entry:
10639 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10640 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10641 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10642 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10643 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10644 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10645 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10646 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10647 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10648 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10649 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10650 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10651 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10652 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10653 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10654 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10655 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10656 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10657 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10658 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
10659 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10660 // CHECK13-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10661 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10662 // CHECK13-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10663 // CHECK13-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10664 // CHECK13-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
10665 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10666 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10667 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10668 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
10669 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10670 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10671 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
10672 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10673 // CHECK13:       cond.true:
10674 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10675 // CHECK13:       cond.false:
10676 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10677 // CHECK13-NEXT:    br label [[COND_END]]
10678 // CHECK13:       cond.end:
10679 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
10680 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10681 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10682 // CHECK13-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
10683 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10684 // CHECK13:       omp.inner.for.cond:
10685 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
10686 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !49
10687 // CHECK13-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
10688 // CHECK13-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10689 // CHECK13:       omp.inner.for.body:
10690 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
10691 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
10692 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10693 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !49
10694 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !49
10695 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
10696 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
10697 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !49
10698 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10699 // CHECK13:       omp.body.continue:
10700 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10701 // CHECK13:       omp.inner.for.inc:
10702 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
10703 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
10704 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
10705 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
10706 // CHECK13:       omp.inner.for.end:
10707 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10708 // CHECK13:       omp.loop.exit:
10709 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
10710 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10711 // CHECK13-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10712 // CHECK13-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10713 // CHECK13:       .omp.final.then:
10714 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
10715 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10716 // CHECK13:       .omp.final.done:
10717 // CHECK13-NEXT:    ret void
10718 //
10719 //
10720 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
10721 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10722 // CHECK13-NEXT:  entry:
10723 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10724 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10725 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10726 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
10727 // CHECK13-NEXT:    ret void
10728 //
10729 //
10730 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17
10731 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10732 // CHECK13-NEXT:  entry:
10733 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10734 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10735 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10736 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10737 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10738 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10739 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10740 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10741 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10742 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10743 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10744 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10745 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10746 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10747 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10748 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
10749 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10750 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10751 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10752 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
10753 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10754 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10755 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10756 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10757 // CHECK13:       cond.true:
10758 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10759 // CHECK13:       cond.false:
10760 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10761 // CHECK13-NEXT:    br label [[COND_END]]
10762 // CHECK13:       cond.end:
10763 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10764 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10765 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10766 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10767 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10768 // CHECK13:       omp.inner.for.cond:
10769 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
10770 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
10771 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10772 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10773 // CHECK13:       omp.inner.for.body:
10774 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !52
10775 // CHECK13-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10776 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
10777 // CHECK13-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10778 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52
10779 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10780 // CHECK13:       omp.inner.for.inc:
10781 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
10782 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !52
10783 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
10784 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
10785 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
10786 // CHECK13:       omp.inner.for.end:
10787 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10788 // CHECK13:       omp.loop.exit:
10789 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
10790 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10791 // CHECK13-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10792 // CHECK13-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10793 // CHECK13:       .omp.final.then:
10794 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
10795 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10796 // CHECK13:       .omp.final.done:
10797 // CHECK13-NEXT:    ret void
10798 //
10799 //
10800 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18
10801 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10802 // CHECK13-NEXT:  entry:
10803 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10804 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10805 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10806 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10807 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10808 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10809 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10810 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10811 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10812 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10813 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10814 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10815 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10816 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10817 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10818 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10819 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10820 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10821 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10822 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
10823 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10824 // CHECK13-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10825 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10826 // CHECK13-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10827 // CHECK13-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10828 // CHECK13-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
10829 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10830 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10831 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10832 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
10833 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10834 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10835 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
10836 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10837 // CHECK13:       cond.true:
10838 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10839 // CHECK13:       cond.false:
10840 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10841 // CHECK13-NEXT:    br label [[COND_END]]
10842 // CHECK13:       cond.end:
10843 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
10844 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10845 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10846 // CHECK13-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
10847 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10848 // CHECK13:       omp.inner.for.cond:
10849 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
10850 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !55
10851 // CHECK13-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
10852 // CHECK13-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10853 // CHECK13:       omp.inner.for.body:
10854 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
10855 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
10856 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10857 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !55
10858 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !55
10859 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
10860 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
10861 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !55
10862 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10863 // CHECK13:       omp.body.continue:
10864 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10865 // CHECK13:       omp.inner.for.inc:
10866 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
10867 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
10868 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
10869 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
10870 // CHECK13:       omp.inner.for.end:
10871 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10872 // CHECK13:       omp.loop.exit:
10873 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
10874 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10875 // CHECK13-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10876 // CHECK13-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10877 // CHECK13:       .omp.final.then:
10878 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
10879 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10880 // CHECK13:       .omp.final.done:
10881 // CHECK13-NEXT:    ret void
10882 //
10883 //
10884 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
10885 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10886 // CHECK13-NEXT:  entry:
10887 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10888 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10889 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10890 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10891 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10892 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10893 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10894 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
10895 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10896 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
10897 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
10898 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
10899 // CHECK13-NEXT:    ret void
10900 //
10901 //
10902 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21
10903 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10904 // CHECK13-NEXT:  entry:
10905 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10906 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10907 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10908 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10909 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10910 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10911 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10912 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10913 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10914 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10915 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10916 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10917 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10918 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10919 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
10920 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10921 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
10922 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10923 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10924 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
10925 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10926 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10927 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10928 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
10929 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10930 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10931 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10932 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10933 // CHECK13:       cond.true:
10934 // CHECK13-NEXT:    br label [[COND_END:%.*]]
10935 // CHECK13:       cond.false:
10936 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10937 // CHECK13-NEXT:    br label [[COND_END]]
10938 // CHECK13:       cond.end:
10939 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10940 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10941 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10942 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10943 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10944 // CHECK13:       omp.inner.for.cond:
10945 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
10946 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
10947 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10948 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10949 // CHECK13:       omp.inner.for.body:
10950 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !58
10951 // CHECK13-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10952 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
10953 // CHECK13-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10954 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !58
10955 // CHECK13-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10956 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58
10957 // CHECK13-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58
10958 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58
10959 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10960 // CHECK13:       omp.inner.for.inc:
10961 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
10962 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !58
10963 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
10964 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
10965 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
10966 // CHECK13:       omp.inner.for.end:
10967 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10968 // CHECK13:       omp.loop.exit:
10969 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
10970 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10971 // CHECK13-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10972 // CHECK13-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10973 // CHECK13:       .omp.final.then:
10974 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
10975 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10976 // CHECK13:       .omp.final.done:
10977 // CHECK13-NEXT:    ret void
10978 //
10979 //
10980 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22
10981 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10982 // CHECK13-NEXT:  entry:
10983 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10984 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10985 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10986 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10987 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
10988 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10989 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10990 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10991 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10992 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10993 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10994 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10995 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
10996 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10997 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10998 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10999 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11000 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11001 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
11002 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11003 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
11004 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11005 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11006 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11007 // CHECK13-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
11008 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11009 // CHECK13-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
11010 // CHECK13-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
11011 // CHECK13-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
11012 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11013 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11014 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
11015 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11016 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
11017 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
11018 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11019 // CHECK13:       omp.dispatch.cond:
11020 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11021 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11022 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
11023 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
11024 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11025 // CHECK13:       cond.true:
11026 // CHECK13-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11027 // CHECK13-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
11028 // CHECK13-NEXT:    br label [[COND_END:%.*]]
11029 // CHECK13:       cond.false:
11030 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11031 // CHECK13-NEXT:    br label [[COND_END]]
11032 // CHECK13:       cond.end:
11033 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
11034 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11035 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11036 // CHECK13-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
11037 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11038 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11039 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
11040 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11041 // CHECK13:       omp.dispatch.body:
11042 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11043 // CHECK13:       omp.inner.for.cond:
11044 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
11045 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !61
11046 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
11047 // CHECK13-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11048 // CHECK13:       omp.inner.for.body:
11049 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
11050 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
11051 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11052 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !61
11053 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !61
11054 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
11055 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11056 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !61
11057 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11058 // CHECK13:       omp.body.continue:
11059 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11060 // CHECK13:       omp.inner.for.inc:
11061 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
11062 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
11063 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
11064 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
11065 // CHECK13:       omp.inner.for.end:
11066 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11067 // CHECK13:       omp.dispatch.inc:
11068 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11069 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11070 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11071 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
11072 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11073 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11074 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
11075 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
11076 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
11077 // CHECK13:       omp.dispatch.end:
11078 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
11079 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11080 // CHECK13-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11081 // CHECK13-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11082 // CHECK13:       .omp.final.then:
11083 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
11084 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11085 // CHECK13:       .omp.final.done:
11086 // CHECK13-NEXT:    ret void
11087 //
11088 //
11089 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
11090 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
11091 // CHECK13-NEXT:  entry:
11092 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11093 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11094 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11095 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
11096 // CHECK13-NEXT:    ret void
11097 //
11098 //
11099 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25
11100 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
11101 // CHECK13-NEXT:  entry:
11102 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11103 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11104 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11105 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11106 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11107 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11108 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11109 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11110 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11111 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
11112 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11113 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11114 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11115 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11116 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11117 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
11118 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11119 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11120 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11121 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
11122 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11123 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11124 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
11125 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11126 // CHECK13:       cond.true:
11127 // CHECK13-NEXT:    br label [[COND_END:%.*]]
11128 // CHECK13:       cond.false:
11129 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11130 // CHECK13-NEXT:    br label [[COND_END]]
11131 // CHECK13:       cond.end:
11132 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
11133 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11134 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11135 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11136 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11137 // CHECK13:       omp.inner.for.cond:
11138 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
11139 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
11140 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
11141 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11142 // CHECK13:       omp.inner.for.body:
11143 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !64
11144 // CHECK13-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
11145 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
11146 // CHECK13-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
11147 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64
11148 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11149 // CHECK13:       omp.inner.for.inc:
11150 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
11151 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !64
11152 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
11153 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
11154 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
11155 // CHECK13:       omp.inner.for.end:
11156 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11157 // CHECK13:       omp.loop.exit:
11158 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
11159 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11160 // CHECK13-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
11161 // CHECK13-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11162 // CHECK13:       .omp.final.then:
11163 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
11164 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11165 // CHECK13:       .omp.final.done:
11166 // CHECK13-NEXT:    ret void
11167 //
11168 //
11169 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26
11170 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
11171 // CHECK13-NEXT:  entry:
11172 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11173 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11174 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11175 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11176 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11177 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11178 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11179 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11180 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11181 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11182 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11183 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
11184 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11185 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11186 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11187 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11188 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11189 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11190 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11191 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11192 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11193 // CHECK13-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
11194 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11195 // CHECK13-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
11196 // CHECK13-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11197 // CHECK13-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
11198 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11199 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11200 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11201 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11202 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11203 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
11204 // CHECK13-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
11205 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11206 // CHECK13:       omp.dispatch.cond:
11207 // CHECK13-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11208 // CHECK13-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
11209 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11210 // CHECK13:       omp.dispatch.body:
11211 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11212 // CHECK13-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
11213 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11214 // CHECK13:       omp.inner.for.cond:
11215 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
11216 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !67
11217 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
11218 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11219 // CHECK13:       omp.inner.for.body:
11220 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
11221 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
11222 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11223 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !67
11224 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !67
11225 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
11226 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11227 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !67
11228 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11229 // CHECK13:       omp.body.continue:
11230 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11231 // CHECK13:       omp.inner.for.inc:
11232 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
11233 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
11234 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
11235 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
11236 // CHECK13:       omp.inner.for.end:
11237 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11238 // CHECK13:       omp.dispatch.inc:
11239 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
11240 // CHECK13:       omp.dispatch.end:
11241 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11242 // CHECK13-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
11243 // CHECK13-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11244 // CHECK13:       .omp.final.then:
11245 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
11246 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11247 // CHECK13:       .omp.final.done:
11248 // CHECK13-NEXT:    ret void
11249 //
11250 //
11251 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
11252 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11253 // CHECK13-NEXT:  entry:
11254 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11255 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
11256 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
11257 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11258 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
11259 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11260 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
11261 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
11262 // CHECK13-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
11263 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
11264 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
11265 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
11266 // CHECK13-NEXT:    ret void
11267 //
11268 //
11269 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29
11270 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11271 // CHECK13-NEXT:  entry:
11272 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11273 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11274 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11275 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
11276 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11277 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11278 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11279 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11280 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11281 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11282 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
11283 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
11284 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11285 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11286 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11287 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
11288 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11289 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
11290 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11291 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
11292 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11293 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11294 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11295 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
11296 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11297 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11298 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
11299 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11300 // CHECK13:       cond.true:
11301 // CHECK13-NEXT:    br label [[COND_END:%.*]]
11302 // CHECK13:       cond.false:
11303 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11304 // CHECK13-NEXT:    br label [[COND_END]]
11305 // CHECK13:       cond.end:
11306 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
11307 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11308 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11309 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11310 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11311 // CHECK13:       omp.inner.for.cond:
11312 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
11313 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
11314 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
11315 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11316 // CHECK13:       omp.inner.for.body:
11317 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !70
11318 // CHECK13-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
11319 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
11320 // CHECK13-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
11321 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !70
11322 // CHECK13-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
11323 // CHECK13-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70
11324 // CHECK13-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70
11325 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70
11326 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11327 // CHECK13:       omp.inner.for.inc:
11328 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
11329 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !70
11330 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
11331 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
11332 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
11333 // CHECK13:       omp.inner.for.end:
11334 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11335 // CHECK13:       omp.loop.exit:
11336 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
11337 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11338 // CHECK13-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
11339 // CHECK13-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11340 // CHECK13:       .omp.final.then:
11341 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
11342 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11343 // CHECK13:       .omp.final.done:
11344 // CHECK13-NEXT:    ret void
11345 //
11346 //
11347 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30
11348 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11349 // CHECK13-NEXT:  entry:
11350 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11351 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11352 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11353 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11354 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
11355 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
11356 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11357 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11358 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11359 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11360 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11361 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11362 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
11363 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11364 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11365 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11366 // CHECK13-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11367 // CHECK13-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
11368 // CHECK13-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
11369 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
11370 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
11371 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11372 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11373 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11374 // CHECK13-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
11375 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11376 // CHECK13-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
11377 // CHECK13-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
11378 // CHECK13-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
11379 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11380 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11381 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
11382 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11383 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11384 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11385 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
11386 // CHECK13-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
11387 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11388 // CHECK13:       omp.dispatch.cond:
11389 // CHECK13-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11390 // CHECK13-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
11391 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11392 // CHECK13:       omp.dispatch.body:
11393 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11394 // CHECK13-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
11395 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11396 // CHECK13:       omp.inner.for.cond:
11397 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
11398 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !73
11399 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
11400 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11401 // CHECK13:       omp.inner.for.body:
11402 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
11403 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
11404 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11405 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !73
11406 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !73
11407 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
11408 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11409 // CHECK13-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !73
11410 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11411 // CHECK13:       omp.body.continue:
11412 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11413 // CHECK13:       omp.inner.for.inc:
11414 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
11415 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
11416 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
11417 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
11418 // CHECK13:       omp.inner.for.end:
11419 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11420 // CHECK13:       omp.dispatch.inc:
11421 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
11422 // CHECK13:       omp.dispatch.end:
11423 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11424 // CHECK13-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
11425 // CHECK13-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11426 // CHECK13:       .omp.final.then:
11427 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
11428 // CHECK13-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11429 // CHECK13:       .omp.final.done:
11430 // CHECK13-NEXT:    ret void
11431 //
11432 //
11433 // CHECK13-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
11434 // CHECK13-SAME: () #[[ATTR5:[0-9]+]] {
11435 // CHECK13-NEXT:  entry:
11436 // CHECK13-NEXT:    call void @__tgt_register_requires(i64 1)
11437 // CHECK13-NEXT:    ret void
11438 //
11439 //
11440 // CHECK14-LABEL: define {{[^@]+}}@main
11441 // CHECK14-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
11442 // CHECK14-NEXT:  entry:
11443 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
11444 // CHECK14-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
11445 // CHECK14-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
11446 // CHECK14-NEXT:    [[N:%.*]] = alloca i32, align 4
11447 // CHECK14-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
11448 // CHECK14-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
11449 // CHECK14-NEXT:    [[M:%.*]] = alloca i32, align 4
11450 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11451 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
11452 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
11453 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
11454 // CHECK14-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
11455 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11456 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11457 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11458 // CHECK14-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
11459 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
11460 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
11461 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
11462 // CHECK14-NEXT:    [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
11463 // CHECK14-NEXT:    [[_TMP9:%.*]] = alloca i32, align 4
11464 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
11465 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
11466 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
11467 // CHECK14-NEXT:    [[N_CASTED19:%.*]] = alloca i64, align 8
11468 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
11469 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
11470 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
11471 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
11472 // CHECK14-NEXT:    [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
11473 // CHECK14-NEXT:    [[_TMP26:%.*]] = alloca i32, align 4
11474 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
11475 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
11476 // CHECK14-NEXT:    [[N_CASTED35:%.*]] = alloca i64, align 8
11477 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
11478 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
11479 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
11480 // CHECK14-NEXT:    [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
11481 // CHECK14-NEXT:    [[_TMP41:%.*]] = alloca i32, align 4
11482 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
11483 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
11484 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
11485 // CHECK14-NEXT:    [[N_CASTED51:%.*]] = alloca i64, align 8
11486 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
11487 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
11488 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
11489 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
11490 // CHECK14-NEXT:    [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
11491 // CHECK14-NEXT:    [[_TMP59:%.*]] = alloca i32, align 4
11492 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
11493 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
11494 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
11495 // CHECK14-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
11496 // CHECK14-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
11497 // CHECK14-NEXT:    store i32 100, i32* [[N]], align 4
11498 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
11499 // CHECK14-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
11500 // CHECK14-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
11501 // CHECK14-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
11502 // CHECK14-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
11503 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
11504 // CHECK14-NEXT:    store i32 10, i32* [[M]], align 4
11505 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
11506 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11507 // CHECK14-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4
11508 // CHECK14-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
11509 // CHECK14-NEXT:    [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
11510 // CHECK14-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11511 // CHECK14-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
11512 // CHECK14-NEXT:    store i64 [[TMP4]], i64* [[TMP7]], align 8
11513 // CHECK14-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11514 // CHECK14-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
11515 // CHECK14-NEXT:    store i64 [[TMP4]], i64* [[TMP9]], align 8
11516 // CHECK14-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11517 // CHECK14-NEXT:    store i64 4, i64* [[TMP10]], align 8
11518 // CHECK14-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
11519 // CHECK14-NEXT:    store i8* null, i8** [[TMP11]], align 8
11520 // CHECK14-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11521 // CHECK14-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
11522 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP13]], align 8
11523 // CHECK14-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11524 // CHECK14-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
11525 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP15]], align 8
11526 // CHECK14-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
11527 // CHECK14-NEXT:    store i64 8, i64* [[TMP16]], align 8
11528 // CHECK14-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
11529 // CHECK14-NEXT:    store i8* null, i8** [[TMP17]], align 8
11530 // CHECK14-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11531 // CHECK14-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
11532 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 8
11533 // CHECK14-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11534 // CHECK14-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
11535 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 8
11536 // CHECK14-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
11537 // CHECK14-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
11538 // CHECK14-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
11539 // CHECK14-NEXT:    store i8* null, i8** [[TMP23]], align 8
11540 // CHECK14-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11541 // CHECK14-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11542 // CHECK14-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11543 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
11544 // CHECK14-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
11545 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11546 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
11547 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11548 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11549 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11550 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11551 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
11552 // CHECK14-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
11553 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
11554 // CHECK14-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
11555 // CHECK14-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11556 // CHECK14-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11557 // CHECK14:       omp_offload.failed:
11558 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
11559 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11560 // CHECK14:       omp_offload.cont:
11561 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
11562 // CHECK14-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
11563 // CHECK14-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
11564 // CHECK14-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
11565 // CHECK14-NEXT:    [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
11566 // CHECK14-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11567 // CHECK14-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
11568 // CHECK14-NEXT:    store i64 [[TMP34]], i64* [[TMP37]], align 8
11569 // CHECK14-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11570 // CHECK14-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
11571 // CHECK14-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
11572 // CHECK14-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
11573 // CHECK14-NEXT:    store i64 4, i64* [[TMP40]], align 8
11574 // CHECK14-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
11575 // CHECK14-NEXT:    store i8* null, i8** [[TMP41]], align 8
11576 // CHECK14-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
11577 // CHECK14-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
11578 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP43]], align 8
11579 // CHECK14-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
11580 // CHECK14-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
11581 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP45]], align 8
11582 // CHECK14-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
11583 // CHECK14-NEXT:    store i64 8, i64* [[TMP46]], align 8
11584 // CHECK14-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
11585 // CHECK14-NEXT:    store i8* null, i8** [[TMP47]], align 8
11586 // CHECK14-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
11587 // CHECK14-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
11588 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP49]], align 8
11589 // CHECK14-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
11590 // CHECK14-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
11591 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP51]], align 8
11592 // CHECK14-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
11593 // CHECK14-NEXT:    store i64 [[TMP35]], i64* [[TMP52]], align 8
11594 // CHECK14-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
11595 // CHECK14-NEXT:    store i8* null, i8** [[TMP53]], align 8
11596 // CHECK14-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11597 // CHECK14-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11598 // CHECK14-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
11599 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[N]], align 4
11600 // CHECK14-NEXT:    store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
11601 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
11602 // CHECK14-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
11603 // CHECK14-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
11604 // CHECK14-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
11605 // CHECK14-NEXT:    store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
11606 // CHECK14-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
11607 // CHECK14-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
11608 // CHECK14-NEXT:    [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
11609 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
11610 // CHECK14-NEXT:    [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
11611 // CHECK14-NEXT:    [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
11612 // CHECK14-NEXT:    br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
11613 // CHECK14:       omp_offload.failed16:
11614 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
11615 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
11616 // CHECK14:       omp_offload.cont17:
11617 // CHECK14-NEXT:    [[TMP63:%.*]] = load i32, i32* [[M]], align 4
11618 // CHECK14-NEXT:    store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
11619 // CHECK14-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N]], align 4
11620 // CHECK14-NEXT:    [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
11621 // CHECK14-NEXT:    store i32 [[TMP64]], i32* [[CONV20]], align 4
11622 // CHECK14-NEXT:    [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
11623 // CHECK14-NEXT:    [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
11624 // CHECK14-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
11625 // CHECK14-NEXT:    store i32 [[TMP66]], i32* [[CONV21]], align 4
11626 // CHECK14-NEXT:    [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
11627 // CHECK14-NEXT:    [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
11628 // CHECK14-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
11629 // CHECK14-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
11630 // CHECK14-NEXT:    store i64 [[TMP65]], i64* [[TMP70]], align 8
11631 // CHECK14-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
11632 // CHECK14-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
11633 // CHECK14-NEXT:    store i64 [[TMP65]], i64* [[TMP72]], align 8
11634 // CHECK14-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
11635 // CHECK14-NEXT:    store i64 4, i64* [[TMP73]], align 8
11636 // CHECK14-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
11637 // CHECK14-NEXT:    store i8* null, i8** [[TMP74]], align 8
11638 // CHECK14-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
11639 // CHECK14-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
11640 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP76]], align 8
11641 // CHECK14-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
11642 // CHECK14-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
11643 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP78]], align 8
11644 // CHECK14-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
11645 // CHECK14-NEXT:    store i64 8, i64* [[TMP79]], align 8
11646 // CHECK14-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
11647 // CHECK14-NEXT:    store i8* null, i8** [[TMP80]], align 8
11648 // CHECK14-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
11649 // CHECK14-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
11650 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP82]], align 8
11651 // CHECK14-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
11652 // CHECK14-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
11653 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 8
11654 // CHECK14-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
11655 // CHECK14-NEXT:    store i64 [[TMP68]], i64* [[TMP85]], align 8
11656 // CHECK14-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
11657 // CHECK14-NEXT:    store i8* null, i8** [[TMP86]], align 8
11658 // CHECK14-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
11659 // CHECK14-NEXT:    [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
11660 // CHECK14-NEXT:    store i64 [[TMP67]], i64* [[TMP88]], align 8
11661 // CHECK14-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
11662 // CHECK14-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
11663 // CHECK14-NEXT:    store i64 [[TMP67]], i64* [[TMP90]], align 8
11664 // CHECK14-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
11665 // CHECK14-NEXT:    store i64 4, i64* [[TMP91]], align 8
11666 // CHECK14-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
11667 // CHECK14-NEXT:    store i8* null, i8** [[TMP92]], align 8
11668 // CHECK14-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
11669 // CHECK14-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
11670 // CHECK14-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
11671 // CHECK14-NEXT:    [[TMP96:%.*]] = load i32, i32* [[N]], align 4
11672 // CHECK14-NEXT:    store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
11673 // CHECK14-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
11674 // CHECK14-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
11675 // CHECK14-NEXT:    [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
11676 // CHECK14-NEXT:    [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
11677 // CHECK14-NEXT:    store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
11678 // CHECK14-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
11679 // CHECK14-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
11680 // CHECK14-NEXT:    [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
11681 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
11682 // CHECK14-NEXT:    [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
11683 // CHECK14-NEXT:    [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
11684 // CHECK14-NEXT:    br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
11685 // CHECK14:       omp_offload.failed33:
11686 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
11687 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT34]]
11688 // CHECK14:       omp_offload.cont34:
11689 // CHECK14-NEXT:    [[TMP102:%.*]] = load i32, i32* [[N]], align 4
11690 // CHECK14-NEXT:    [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
11691 // CHECK14-NEXT:    store i32 [[TMP102]], i32* [[CONV36]], align 4
11692 // CHECK14-NEXT:    [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
11693 // CHECK14-NEXT:    [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
11694 // CHECK14-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
11695 // CHECK14-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
11696 // CHECK14-NEXT:    store i64 [[TMP103]], i64* [[TMP106]], align 8
11697 // CHECK14-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
11698 // CHECK14-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
11699 // CHECK14-NEXT:    store i64 [[TMP103]], i64* [[TMP108]], align 8
11700 // CHECK14-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
11701 // CHECK14-NEXT:    store i64 4, i64* [[TMP109]], align 8
11702 // CHECK14-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
11703 // CHECK14-NEXT:    store i8* null, i8** [[TMP110]], align 8
11704 // CHECK14-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
11705 // CHECK14-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
11706 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP112]], align 8
11707 // CHECK14-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
11708 // CHECK14-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
11709 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP114]], align 8
11710 // CHECK14-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
11711 // CHECK14-NEXT:    store i64 8, i64* [[TMP115]], align 8
11712 // CHECK14-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
11713 // CHECK14-NEXT:    store i8* null, i8** [[TMP116]], align 8
11714 // CHECK14-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
11715 // CHECK14-NEXT:    [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
11716 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP118]], align 8
11717 // CHECK14-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
11718 // CHECK14-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
11719 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP120]], align 8
11720 // CHECK14-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
11721 // CHECK14-NEXT:    store i64 [[TMP104]], i64* [[TMP121]], align 8
11722 // CHECK14-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
11723 // CHECK14-NEXT:    store i8* null, i8** [[TMP122]], align 8
11724 // CHECK14-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
11725 // CHECK14-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
11726 // CHECK14-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
11727 // CHECK14-NEXT:    [[TMP126:%.*]] = load i32, i32* [[N]], align 4
11728 // CHECK14-NEXT:    store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
11729 // CHECK14-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
11730 // CHECK14-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
11731 // CHECK14-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
11732 // CHECK14-NEXT:    [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
11733 // CHECK14-NEXT:    store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
11734 // CHECK14-NEXT:    [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
11735 // CHECK14-NEXT:    [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
11736 // CHECK14-NEXT:    [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
11737 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
11738 // CHECK14-NEXT:    [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
11739 // CHECK14-NEXT:    [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
11740 // CHECK14-NEXT:    br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
11741 // CHECK14:       omp_offload.failed48:
11742 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
11743 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT49]]
11744 // CHECK14:       omp_offload.cont49:
11745 // CHECK14-NEXT:    [[TMP132:%.*]] = load i32, i32* [[M]], align 4
11746 // CHECK14-NEXT:    store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
11747 // CHECK14-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
11748 // CHECK14-NEXT:    [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
11749 // CHECK14-NEXT:    store i32 [[TMP133]], i32* [[CONV52]], align 4
11750 // CHECK14-NEXT:    [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
11751 // CHECK14-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
11752 // CHECK14-NEXT:    [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
11753 // CHECK14-NEXT:    store i32 [[TMP135]], i32* [[CONV54]], align 4
11754 // CHECK14-NEXT:    [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
11755 // CHECK14-NEXT:    [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
11756 // CHECK14-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
11757 // CHECK14-NEXT:    [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
11758 // CHECK14-NEXT:    store i64 [[TMP134]], i64* [[TMP139]], align 8
11759 // CHECK14-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
11760 // CHECK14-NEXT:    [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
11761 // CHECK14-NEXT:    store i64 [[TMP134]], i64* [[TMP141]], align 8
11762 // CHECK14-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
11763 // CHECK14-NEXT:    store i64 4, i64* [[TMP142]], align 8
11764 // CHECK14-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
11765 // CHECK14-NEXT:    store i8* null, i8** [[TMP143]], align 8
11766 // CHECK14-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
11767 // CHECK14-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
11768 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP145]], align 8
11769 // CHECK14-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
11770 // CHECK14-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
11771 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[TMP147]], align 8
11772 // CHECK14-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
11773 // CHECK14-NEXT:    store i64 8, i64* [[TMP148]], align 8
11774 // CHECK14-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
11775 // CHECK14-NEXT:    store i8* null, i8** [[TMP149]], align 8
11776 // CHECK14-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
11777 // CHECK14-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
11778 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP151]], align 8
11779 // CHECK14-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
11780 // CHECK14-NEXT:    [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
11781 // CHECK14-NEXT:    store i32* [[VLA]], i32** [[TMP153]], align 8
11782 // CHECK14-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
11783 // CHECK14-NEXT:    store i64 [[TMP137]], i64* [[TMP154]], align 8
11784 // CHECK14-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
11785 // CHECK14-NEXT:    store i8* null, i8** [[TMP155]], align 8
11786 // CHECK14-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
11787 // CHECK14-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
11788 // CHECK14-NEXT:    store i64 [[TMP136]], i64* [[TMP157]], align 8
11789 // CHECK14-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
11790 // CHECK14-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
11791 // CHECK14-NEXT:    store i64 [[TMP136]], i64* [[TMP159]], align 8
11792 // CHECK14-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
11793 // CHECK14-NEXT:    store i64 4, i64* [[TMP160]], align 8
11794 // CHECK14-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
11795 // CHECK14-NEXT:    store i8* null, i8** [[TMP161]], align 8
11796 // CHECK14-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
11797 // CHECK14-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
11798 // CHECK14-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
11799 // CHECK14-NEXT:    [[TMP165:%.*]] = load i32, i32* [[N]], align 4
11800 // CHECK14-NEXT:    store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
11801 // CHECK14-NEXT:    [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
11802 // CHECK14-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
11803 // CHECK14-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
11804 // CHECK14-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
11805 // CHECK14-NEXT:    store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
11806 // CHECK14-NEXT:    [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
11807 // CHECK14-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
11808 // CHECK14-NEXT:    [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
11809 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
11810 // CHECK14-NEXT:    [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
11811 // CHECK14-NEXT:    [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
11812 // CHECK14-NEXT:    br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
11813 // CHECK14:       omp_offload.failed66:
11814 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
11815 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT67]]
11816 // CHECK14:       omp_offload.cont67:
11817 // CHECK14-NEXT:    [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
11818 // CHECK14-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
11819 // CHECK14-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
11820 // CHECK14-NEXT:    [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
11821 // CHECK14-NEXT:    call void @llvm.stackrestore(i8* [[TMP172]])
11822 // CHECK14-NEXT:    [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
11823 // CHECK14-NEXT:    ret i32 [[TMP173]]
11824 //
11825 //
11826 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
11827 // CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
11828 // CHECK14-NEXT:  entry:
11829 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11830 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
11831 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11832 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11833 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11834 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
11835 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11836 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11837 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
11838 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
11839 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11840 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11841 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
11842 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
11843 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
11844 // CHECK14-NEXT:    ret void
11845 //
11846 //
11847 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined.
11848 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11849 // CHECK14-NEXT:  entry:
11850 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11851 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11852 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11853 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
11854 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11855 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11856 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11857 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11858 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11859 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
11860 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11861 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11862 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11863 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11864 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
11865 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11866 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11867 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11868 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11869 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
11870 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11871 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11872 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
11873 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
11874 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11875 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
11876 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11877 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
11878 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11879 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11880 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11881 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
11882 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11883 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
11884 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11885 // CHECK14:       omp.precond.then:
11886 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11887 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11888 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
11889 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11890 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11891 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11892 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
11893 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11894 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11895 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11896 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
11897 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11898 // CHECK14:       cond.true:
11899 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11900 // CHECK14-NEXT:    br label [[COND_END:%.*]]
11901 // CHECK14:       cond.false:
11902 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11903 // CHECK14-NEXT:    br label [[COND_END]]
11904 // CHECK14:       cond.end:
11905 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
11906 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11907 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11908 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
11909 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11910 // CHECK14:       omp.inner.for.cond:
11911 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
11912 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
11913 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
11914 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11915 // CHECK14:       omp.inner.for.body:
11916 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !13
11917 // CHECK14-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
11918 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
11919 // CHECK14-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11920 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !13
11921 // CHECK14-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11922 // CHECK14-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !13
11923 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !13
11924 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !13
11925 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11926 // CHECK14:       omp.inner.for.inc:
11927 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
11928 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !13
11929 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11930 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
11931 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
11932 // CHECK14:       omp.inner.for.end:
11933 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11934 // CHECK14:       omp.loop.exit:
11935 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11936 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
11937 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
11938 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11939 // CHECK14-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11940 // CHECK14-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11941 // CHECK14:       .omp.final.then:
11942 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11943 // CHECK14-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
11944 // CHECK14-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11945 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
11946 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
11947 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
11948 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11949 // CHECK14:       .omp.final.done:
11950 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
11951 // CHECK14:       omp.precond.end:
11952 // CHECK14-NEXT:    ret void
11953 //
11954 //
11955 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..1
11956 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11957 // CHECK14-NEXT:  entry:
11958 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11959 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11960 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11961 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11962 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11963 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
11964 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11965 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11966 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11967 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11968 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11969 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
11970 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11971 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11972 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11973 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11974 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
11975 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11976 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11977 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11978 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11979 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11980 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
11981 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11982 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11983 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
11984 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
11985 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11986 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
11987 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11988 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
11989 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11990 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11991 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11992 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
11993 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11994 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
11995 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11996 // CHECK14:       omp.precond.then:
11997 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11998 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11999 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
12000 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12001 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
12002 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12003 // CHECK14-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
12004 // CHECK14-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
12005 // CHECK14-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
12006 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12007 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12008 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12009 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12010 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12011 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12012 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12013 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12014 // CHECK14-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12015 // CHECK14:       cond.true:
12016 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12017 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12018 // CHECK14:       cond.false:
12019 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12020 // CHECK14-NEXT:    br label [[COND_END]]
12021 // CHECK14:       cond.end:
12022 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12023 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12024 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12025 // CHECK14-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12026 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12027 // CHECK14:       omp.inner.for.cond:
12028 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
12029 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !17
12030 // CHECK14-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12031 // CHECK14-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12032 // CHECK14:       omp.inner.for.body:
12033 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
12034 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
12035 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12036 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !17
12037 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !17
12038 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
12039 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
12040 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !17
12041 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12042 // CHECK14:       omp.body.continue:
12043 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12044 // CHECK14:       omp.inner.for.inc:
12045 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
12046 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
12047 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
12048 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
12049 // CHECK14:       omp.inner.for.end:
12050 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12051 // CHECK14:       omp.loop.exit:
12052 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12053 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
12054 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
12055 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12056 // CHECK14-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12057 // CHECK14-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12058 // CHECK14:       .omp.final.then:
12059 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12060 // CHECK14-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
12061 // CHECK14-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
12062 // CHECK14-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
12063 // CHECK14-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
12064 // CHECK14-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
12065 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12066 // CHECK14:       .omp.final.done:
12067 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12068 // CHECK14:       omp.precond.end:
12069 // CHECK14-NEXT:    ret void
12070 //
12071 //
12072 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
12073 // CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12074 // CHECK14-NEXT:  entry:
12075 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12076 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12077 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12078 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12079 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12080 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12081 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12082 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12083 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12084 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12085 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12086 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12087 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
12088 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
12089 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
12090 // CHECK14-NEXT:    ret void
12091 //
12092 //
12093 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..2
12094 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12095 // CHECK14-NEXT:  entry:
12096 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12097 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12098 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12099 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12100 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12101 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12102 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12103 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12104 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12105 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12106 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12107 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12108 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12109 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12110 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
12111 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12112 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12113 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12114 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12115 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12116 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12117 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12118 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12119 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12120 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12121 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
12122 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12123 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12124 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12125 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12126 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12127 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12128 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12129 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12130 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12131 // CHECK14:       omp.precond.then:
12132 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12133 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12134 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
12135 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12136 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12137 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12138 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
12139 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12140 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12141 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12142 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
12143 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12144 // CHECK14:       cond.true:
12145 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12146 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12147 // CHECK14:       cond.false:
12148 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12149 // CHECK14-NEXT:    br label [[COND_END]]
12150 // CHECK14:       cond.end:
12151 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
12152 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12153 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12154 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
12155 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12156 // CHECK14:       omp.inner.for.cond:
12157 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
12158 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
12159 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
12160 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12161 // CHECK14:       omp.inner.for.body:
12162 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !22
12163 // CHECK14-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
12164 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
12165 // CHECK14-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12166 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !22
12167 // CHECK14-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12168 // CHECK14-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !22
12169 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !22
12170 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !22
12171 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12172 // CHECK14:       omp.inner.for.inc:
12173 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
12174 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !22
12175 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12176 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
12177 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
12178 // CHECK14:       omp.inner.for.end:
12179 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12180 // CHECK14:       omp.loop.exit:
12181 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12182 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12183 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12184 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12185 // CHECK14-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12186 // CHECK14-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12187 // CHECK14:       .omp.final.then:
12188 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12189 // CHECK14-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
12190 // CHECK14-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12191 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12192 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12193 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
12194 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12195 // CHECK14:       .omp.final.done:
12196 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12197 // CHECK14:       omp.precond.end:
12198 // CHECK14-NEXT:    ret void
12199 //
12200 //
12201 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..3
12202 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12203 // CHECK14-NEXT:  entry:
12204 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12205 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12206 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12207 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12208 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12209 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12210 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12211 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12212 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12213 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12214 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12215 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12216 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12217 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12218 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12219 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12220 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
12221 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12222 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12223 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12224 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12225 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12226 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12227 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12228 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12229 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12230 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12231 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12232 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
12233 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12234 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12235 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12236 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12237 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12238 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12239 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12240 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12241 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12242 // CHECK14:       omp.precond.then:
12243 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12244 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12245 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
12246 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12247 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
12248 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12249 // CHECK14-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
12250 // CHECK14-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
12251 // CHECK14-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
12252 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12253 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12254 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12255 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12256 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12257 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12258 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12259 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12260 // CHECK14-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12261 // CHECK14:       cond.true:
12262 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12263 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12264 // CHECK14:       cond.false:
12265 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12266 // CHECK14-NEXT:    br label [[COND_END]]
12267 // CHECK14:       cond.end:
12268 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12269 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12270 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12271 // CHECK14-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12272 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12273 // CHECK14:       omp.inner.for.cond:
12274 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
12275 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
12276 // CHECK14-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12277 // CHECK14-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12278 // CHECK14:       omp.inner.for.body:
12279 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
12280 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
12281 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12282 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !25
12283 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !25
12284 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
12285 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
12286 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
12287 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12288 // CHECK14:       omp.body.continue:
12289 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12290 // CHECK14:       omp.inner.for.inc:
12291 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
12292 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
12293 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
12294 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
12295 // CHECK14:       omp.inner.for.end:
12296 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12297 // CHECK14:       omp.loop.exit:
12298 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12299 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
12300 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
12301 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12302 // CHECK14-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12303 // CHECK14-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12304 // CHECK14:       .omp.final.then:
12305 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12306 // CHECK14-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
12307 // CHECK14-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
12308 // CHECK14-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
12309 // CHECK14-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
12310 // CHECK14-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
12311 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12312 // CHECK14:       .omp.final.done:
12313 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12314 // CHECK14:       omp.precond.end:
12315 // CHECK14-NEXT:    ret void
12316 //
12317 //
12318 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
12319 // CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12320 // CHECK14-NEXT:  entry:
12321 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12322 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12323 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12324 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12325 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12326 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12327 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12328 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12329 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12330 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12331 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12332 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12333 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12334 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12335 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12336 // CHECK14-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12337 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
12338 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
12339 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
12340 // CHECK14-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12341 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
12342 // CHECK14-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
12343 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
12344 // CHECK14-NEXT:    ret void
12345 //
12346 //
12347 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5
12348 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12349 // CHECK14-NEXT:  entry:
12350 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12351 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12352 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12353 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12354 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12355 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12356 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12357 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12358 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12359 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
12360 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12361 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12362 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12363 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12364 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12365 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
12366 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12367 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12368 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12369 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12370 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12371 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12372 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12373 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12374 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12375 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12376 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12377 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12378 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12379 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12380 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12381 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12382 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12383 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
12384 // CHECK14-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12385 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12386 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12387 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12388 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12389 // CHECK14:       omp.precond.then:
12390 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12391 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12392 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
12393 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12394 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12395 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
12396 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12397 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
12398 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
12399 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12400 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12401 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
12402 // CHECK14-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12403 // CHECK14:       cond.true:
12404 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12405 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12406 // CHECK14:       cond.false:
12407 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12408 // CHECK14-NEXT:    br label [[COND_END]]
12409 // CHECK14:       cond.end:
12410 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
12411 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12412 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12413 // CHECK14-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
12414 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12415 // CHECK14:       omp.inner.for.cond:
12416 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
12417 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
12418 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
12419 // CHECK14-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
12420 // CHECK14-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12421 // CHECK14:       omp.inner.for.body:
12422 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
12423 // CHECK14-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
12424 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12425 // CHECK14-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
12426 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !28
12427 // CHECK14-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12428 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4, !llvm.access.group !28
12429 // CHECK14-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !28
12430 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !28
12431 // CHECK14-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12432 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28
12433 // CHECK14-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28
12434 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28
12435 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12436 // CHECK14:       omp.inner.for.inc:
12437 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
12438 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
12439 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
12440 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
12441 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
12442 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
12443 // CHECK14-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
12444 // CHECK14-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
12445 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12446 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
12447 // CHECK14-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
12448 // CHECK14-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12449 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12450 // CHECK14-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
12451 // CHECK14-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
12452 // CHECK14-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
12453 // CHECK14:       cond.true14:
12454 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
12455 // CHECK14-NEXT:    br label [[COND_END16:%.*]]
12456 // CHECK14:       cond.false15:
12457 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12458 // CHECK14-NEXT:    br label [[COND_END16]]
12459 // CHECK14:       cond.end16:
12460 // CHECK14-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
12461 // CHECK14-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
12462 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
12463 // CHECK14-NEXT:    store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
12464 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
12465 // CHECK14:       omp.inner.for.end:
12466 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12467 // CHECK14:       omp.loop.exit:
12468 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12469 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
12470 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
12471 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12472 // CHECK14-NEXT:    [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
12473 // CHECK14-NEXT:    br i1 [[TMP38]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12474 // CHECK14:       .omp.final.then:
12475 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12476 // CHECK14-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP39]], 0
12477 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
12478 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV19]], 1
12479 // CHECK14-NEXT:    [[ADD20:%.*]] = add nsw i32 0, [[MUL]]
12480 // CHECK14-NEXT:    store i32 [[ADD20]], i32* [[I5]], align 4
12481 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12482 // CHECK14:       .omp.final.done:
12483 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12484 // CHECK14:       omp.precond.end:
12485 // CHECK14-NEXT:    ret void
12486 //
12487 //
12488 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6
12489 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12490 // CHECK14-NEXT:  entry:
12491 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12492 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12493 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12494 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12495 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12496 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12497 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12498 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12499 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12500 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12501 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12502 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
12503 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12504 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12505 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12506 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12507 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12508 // CHECK14-NEXT:    [[I7:%.*]] = alloca i32, align 4
12509 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12510 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12511 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12512 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12513 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12514 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12515 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12516 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12517 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12518 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12519 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12520 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12521 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12522 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12523 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12524 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12525 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12526 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
12527 // CHECK14-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12528 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12529 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12530 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12531 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12532 // CHECK14:       omp.precond.then:
12533 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12534 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12535 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
12536 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12537 // CHECK14-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
12538 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12539 // CHECK14-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
12540 // CHECK14-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
12541 // CHECK14-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
12542 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12543 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12544 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12545 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12546 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12547 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12548 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12549 // CHECK14-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12550 // CHECK14-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12551 // CHECK14:       cond.true:
12552 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12553 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12554 // CHECK14:       cond.false:
12555 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12556 // CHECK14-NEXT:    br label [[COND_END]]
12557 // CHECK14:       cond.end:
12558 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12559 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12560 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12561 // CHECK14-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12562 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12563 // CHECK14:       omp.inner.for.cond:
12564 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
12565 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
12566 // CHECK14-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12567 // CHECK14-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12568 // CHECK14:       omp.inner.for.body:
12569 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
12570 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
12571 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12572 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !31
12573 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !31
12574 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
12575 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
12576 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !31
12577 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12578 // CHECK14:       omp.body.continue:
12579 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12580 // CHECK14:       omp.inner.for.inc:
12581 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
12582 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
12583 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
12584 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
12585 // CHECK14:       omp.inner.for.end:
12586 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12587 // CHECK14:       omp.loop.exit:
12588 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12589 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
12590 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
12591 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12592 // CHECK14-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12593 // CHECK14-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12594 // CHECK14:       .omp.final.then:
12595 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12596 // CHECK14-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP24]], 0
12597 // CHECK14-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
12598 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
12599 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
12600 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[I7]], align 4
12601 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12602 // CHECK14:       .omp.final.done:
12603 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12604 // CHECK14:       omp.precond.end:
12605 // CHECK14-NEXT:    ret void
12606 //
12607 //
12608 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
12609 // CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12610 // CHECK14-NEXT:  entry:
12611 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12612 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12613 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12614 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12615 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12616 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12617 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12618 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12619 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12620 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12621 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12622 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12623 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
12624 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
12625 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
12626 // CHECK14-NEXT:    ret void
12627 //
12628 //
12629 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8
12630 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12631 // CHECK14-NEXT:  entry:
12632 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12633 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12634 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12635 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12636 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12637 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12638 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12639 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12640 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12641 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12642 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12643 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12644 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12645 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12646 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
12647 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12648 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12649 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12650 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12651 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12652 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12653 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12654 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12655 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12656 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12657 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
12658 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12659 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12660 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12661 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12662 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12663 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12664 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12665 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12666 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12667 // CHECK14:       omp.precond.then:
12668 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12669 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12670 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
12671 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12672 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12673 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12674 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
12675 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12676 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12677 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12678 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
12679 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12680 // CHECK14:       cond.true:
12681 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12682 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12683 // CHECK14:       cond.false:
12684 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12685 // CHECK14-NEXT:    br label [[COND_END]]
12686 // CHECK14:       cond.end:
12687 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
12688 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12689 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12690 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
12691 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12692 // CHECK14:       omp.inner.for.cond:
12693 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
12694 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
12695 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
12696 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12697 // CHECK14:       omp.inner.for.body:
12698 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !34
12699 // CHECK14-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
12700 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
12701 // CHECK14-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12702 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !34
12703 // CHECK14-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12704 // CHECK14-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34
12705 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34
12706 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34
12707 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12708 // CHECK14:       omp.inner.for.inc:
12709 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
12710 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !34
12711 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12712 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
12713 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
12714 // CHECK14:       omp.inner.for.end:
12715 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12716 // CHECK14:       omp.loop.exit:
12717 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12718 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12719 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12720 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12721 // CHECK14-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12722 // CHECK14-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12723 // CHECK14:       .omp.final.then:
12724 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12725 // CHECK14-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
12726 // CHECK14-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12727 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12728 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12729 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
12730 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12731 // CHECK14:       .omp.final.done:
12732 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12733 // CHECK14:       omp.precond.end:
12734 // CHECK14-NEXT:    ret void
12735 //
12736 //
12737 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9
12738 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12739 // CHECK14-NEXT:  entry:
12740 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12741 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12742 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12743 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12744 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12745 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12746 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12747 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12748 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12749 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12750 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12751 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12752 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12753 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12754 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12755 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12756 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
12757 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12758 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12759 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12760 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12761 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12762 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12763 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12764 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12765 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12766 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12767 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12768 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
12769 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12770 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12771 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12772 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12773 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12774 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12775 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12776 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12777 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12778 // CHECK14:       omp.precond.then:
12779 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12780 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12781 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
12782 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12783 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
12784 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12785 // CHECK14-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
12786 // CHECK14-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
12787 // CHECK14-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
12788 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12789 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12790 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12791 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12792 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12793 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12794 // CHECK14-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
12795 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12796 // CHECK14:       omp.dispatch.cond:
12797 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12798 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
12799 // CHECK14-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
12800 // CHECK14-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
12801 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12802 // CHECK14:       omp.dispatch.body:
12803 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12804 // CHECK14-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
12805 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12806 // CHECK14:       omp.inner.for.cond:
12807 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
12808 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
12809 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
12810 // CHECK14-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12811 // CHECK14:       omp.inner.for.body:
12812 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
12813 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
12814 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12815 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !37
12816 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !37
12817 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
12818 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
12819 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !37
12820 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12821 // CHECK14:       omp.body.continue:
12822 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12823 // CHECK14:       omp.inner.for.inc:
12824 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
12825 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
12826 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
12827 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
12828 // CHECK14:       omp.inner.for.end:
12829 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
12830 // CHECK14:       omp.dispatch.inc:
12831 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
12832 // CHECK14:       omp.dispatch.end:
12833 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12834 // CHECK14-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
12835 // CHECK14-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12836 // CHECK14:       .omp.final.then:
12837 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12838 // CHECK14-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0
12839 // CHECK14-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
12840 // CHECK14-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
12841 // CHECK14-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
12842 // CHECK14-NEXT:    store i32 [[ADD11]], i32* [[I5]], align 4
12843 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12844 // CHECK14:       .omp.final.done:
12845 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12846 // CHECK14:       omp.precond.end:
12847 // CHECK14-NEXT:    ret void
12848 //
12849 //
12850 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
12851 // CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12852 // CHECK14-NEXT:  entry:
12853 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12854 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12855 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12856 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12857 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12858 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12859 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12860 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12861 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12862 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12863 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12864 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12865 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12866 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12867 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12868 // CHECK14-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12869 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
12870 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
12871 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
12872 // CHECK14-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12873 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
12874 // CHECK14-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
12875 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
12876 // CHECK14-NEXT:    ret void
12877 //
12878 //
12879 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11
12880 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12881 // CHECK14-NEXT:  entry:
12882 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12883 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12884 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12885 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
12886 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12887 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12888 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12889 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12890 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12891 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
12892 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
12893 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12894 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12895 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12896 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12897 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
12898 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12899 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12900 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12901 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12902 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12903 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
12904 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12905 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12906 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12907 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
12908 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
12909 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12910 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12911 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12912 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12913 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12914 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12915 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
12916 // CHECK14-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12917 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
12918 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12919 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
12920 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12921 // CHECK14:       omp.precond.then:
12922 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12923 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12924 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
12925 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12926 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12927 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12928 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
12929 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12930 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12931 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12932 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
12933 // CHECK14-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12934 // CHECK14:       cond.true:
12935 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12936 // CHECK14-NEXT:    br label [[COND_END:%.*]]
12937 // CHECK14:       cond.false:
12938 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12939 // CHECK14-NEXT:    br label [[COND_END]]
12940 // CHECK14:       cond.end:
12941 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
12942 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12943 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12944 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
12945 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12946 // CHECK14:       omp.inner.for.cond:
12947 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12948 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
12949 // CHECK14-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
12950 // CHECK14-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12951 // CHECK14:       omp.inner.for.body:
12952 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !40
12953 // CHECK14-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
12954 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
12955 // CHECK14-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12956 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !40
12957 // CHECK14-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12958 // CHECK14-NEXT:    store i32 [[TMP19]], i32* [[CONV8]], align 4, !llvm.access.group !40
12959 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !40
12960 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !40
12961 // CHECK14-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12962 // CHECK14-NEXT:    store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40
12963 // CHECK14-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40
12964 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40
12965 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12966 // CHECK14:       omp.inner.for.inc:
12967 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12968 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !40
12969 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
12970 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12971 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
12972 // CHECK14:       omp.inner.for.end:
12973 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12974 // CHECK14:       omp.loop.exit:
12975 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12976 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
12977 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
12978 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12979 // CHECK14-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
12980 // CHECK14-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12981 // CHECK14:       .omp.final.then:
12982 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12983 // CHECK14-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP29]], 0
12984 // CHECK14-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12985 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV11]], 1
12986 // CHECK14-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL]]
12987 // CHECK14-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
12988 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12989 // CHECK14:       .omp.final.done:
12990 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
12991 // CHECK14:       omp.precond.end:
12992 // CHECK14-NEXT:    ret void
12993 //
12994 //
12995 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12
12996 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12997 // CHECK14-NEXT:  entry:
12998 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12999 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13000 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13001 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13002 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
13003 // CHECK14-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
13004 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
13005 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13006 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13007 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13008 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13009 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
13010 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13011 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13012 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13013 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13014 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13015 // CHECK14-NEXT:    [[I7:%.*]] = alloca i32, align 4
13016 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13017 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13018 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13019 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13020 // CHECK14-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
13021 // CHECK14-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
13022 // CHECK14-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
13023 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13024 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
13025 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
13026 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
13027 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13028 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
13029 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13030 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13031 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
13032 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13033 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
13034 // CHECK14-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
13035 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
13036 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13037 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
13038 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13039 // CHECK14:       omp.precond.then:
13040 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13041 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
13042 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
13043 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13044 // CHECK14-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
13045 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13046 // CHECK14-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
13047 // CHECK14-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
13048 // CHECK14-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
13049 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13050 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13051 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
13052 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13053 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13054 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13055 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
13056 // CHECK14-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
13057 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13058 // CHECK14:       omp.dispatch.cond:
13059 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13060 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
13061 // CHECK14-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13062 // CHECK14-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
13063 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13064 // CHECK14:       omp.dispatch.body:
13065 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13066 // CHECK14-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13067 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13068 // CHECK14:       omp.inner.for.cond:
13069 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
13070 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43
13071 // CHECK14-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13072 // CHECK14-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13073 // CHECK14:       omp.inner.for.body:
13074 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
13075 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
13076 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13077 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !43
13078 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !43
13079 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
13080 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
13081 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !43
13082 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13083 // CHECK14:       omp.body.continue:
13084 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13085 // CHECK14:       omp.inner.for.inc:
13086 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
13087 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
13088 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
13089 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
13090 // CHECK14:       omp.inner.for.end:
13091 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13092 // CHECK14:       omp.dispatch.inc:
13093 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
13094 // CHECK14:       omp.dispatch.end:
13095 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13096 // CHECK14-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13097 // CHECK14-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13098 // CHECK14:       .omp.final.then:
13099 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13100 // CHECK14-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP24]], 0
13101 // CHECK14-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
13102 // CHECK14-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
13103 // CHECK14-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
13104 // CHECK14-NEXT:    store i32 [[ADD13]], i32* [[I7]], align 4
13105 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13106 // CHECK14:       .omp.final.done:
13107 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
13108 // CHECK14:       omp.precond.end:
13109 // CHECK14-NEXT:    ret void
13110 //
13111 //
13112 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
13113 // CHECK14-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
13114 // CHECK14-NEXT:  entry:
13115 // CHECK14-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
13116 // CHECK14-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
13117 // CHECK14-NEXT:    [[M:%.*]] = alloca i32, align 4
13118 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
13119 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
13120 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
13121 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13122 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
13123 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
13124 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
13125 // CHECK14-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
13126 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13127 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13128 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
13129 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
13130 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
13131 // CHECK14-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
13132 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
13133 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
13134 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
13135 // CHECK14-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
13136 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
13137 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
13138 // CHECK14-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
13139 // CHECK14-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
13140 // CHECK14-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
13141 // CHECK14-NEXT:    [[_TMP25:%.*]] = alloca i32, align 4
13142 // CHECK14-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
13143 // CHECK14-NEXT:    store i32 10, i32* [[M]], align 4
13144 // CHECK14-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13145 // CHECK14-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
13146 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
13147 // CHECK14-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13148 // CHECK14-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
13149 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
13150 // CHECK14-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
13151 // CHECK14-NEXT:    store i8* null, i8** [[TMP4]], align 8
13152 // CHECK14-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13153 // CHECK14-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13154 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
13155 // CHECK14-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
13156 // CHECK14-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
13157 // CHECK14-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13158 // CHECK14:       omp_offload.failed:
13159 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
13160 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13161 // CHECK14:       omp_offload.cont:
13162 // CHECK14-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
13163 // CHECK14-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
13164 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
13165 // CHECK14-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
13166 // CHECK14-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
13167 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
13168 // CHECK14-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
13169 // CHECK14-NEXT:    store i8* null, i8** [[TMP13]], align 8
13170 // CHECK14-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
13171 // CHECK14-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
13172 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
13173 // CHECK14-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
13174 // CHECK14-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
13175 // CHECK14-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
13176 // CHECK14:       omp_offload.failed5:
13177 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
13178 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
13179 // CHECK14:       omp_offload.cont6:
13180 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
13181 // CHECK14-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
13182 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13183 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13184 // CHECK14-NEXT:    store i32 [[TMP19]], i32* [[CONV]], align 4
13185 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
13186 // CHECK14-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
13187 // CHECK14-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
13188 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
13189 // CHECK14-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
13190 // CHECK14-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
13191 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
13192 // CHECK14-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
13193 // CHECK14-NEXT:    store i8* null, i8** [[TMP25]], align 8
13194 // CHECK14-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
13195 // CHECK14-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
13196 // CHECK14-NEXT:    store i64 [[TMP20]], i64* [[TMP27]], align 8
13197 // CHECK14-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
13198 // CHECK14-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
13199 // CHECK14-NEXT:    store i64 [[TMP20]], i64* [[TMP29]], align 8
13200 // CHECK14-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
13201 // CHECK14-NEXT:    store i8* null, i8** [[TMP30]], align 8
13202 // CHECK14-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
13203 // CHECK14-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
13204 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
13205 // CHECK14-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
13206 // CHECK14-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
13207 // CHECK14-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
13208 // CHECK14:       omp_offload.failed11:
13209 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
13210 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
13211 // CHECK14:       omp_offload.cont12:
13212 // CHECK14-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
13213 // CHECK14-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
13214 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
13215 // CHECK14-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
13216 // CHECK14-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
13217 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
13218 // CHECK14-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
13219 // CHECK14-NEXT:    store i8* null, i8** [[TMP39]], align 8
13220 // CHECK14-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
13221 // CHECK14-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
13222 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
13223 // CHECK14-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
13224 // CHECK14-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
13225 // CHECK14-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
13226 // CHECK14:       omp_offload.failed17:
13227 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
13228 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
13229 // CHECK14:       omp_offload.cont18:
13230 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
13231 // CHECK14-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
13232 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
13233 // CHECK14-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
13234 // CHECK14-NEXT:    store i32 [[TMP45]], i32* [[CONV21]], align 4
13235 // CHECK14-NEXT:    [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
13236 // CHECK14-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
13237 // CHECK14-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
13238 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
13239 // CHECK14-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
13240 // CHECK14-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
13241 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
13242 // CHECK14-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
13243 // CHECK14-NEXT:    store i8* null, i8** [[TMP51]], align 8
13244 // CHECK14-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
13245 // CHECK14-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
13246 // CHECK14-NEXT:    store i64 [[TMP46]], i64* [[TMP53]], align 8
13247 // CHECK14-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
13248 // CHECK14-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
13249 // CHECK14-NEXT:    store i64 [[TMP46]], i64* [[TMP55]], align 8
13250 // CHECK14-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
13251 // CHECK14-NEXT:    store i8* null, i8** [[TMP56]], align 8
13252 // CHECK14-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
13253 // CHECK14-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
13254 // CHECK14-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
13255 // CHECK14-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
13256 // CHECK14-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
13257 // CHECK14-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
13258 // CHECK14:       omp_offload.failed26:
13259 // CHECK14-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
13260 // CHECK14-NEXT:    br label [[OMP_OFFLOAD_CONT27]]
13261 // CHECK14:       omp_offload.cont27:
13262 // CHECK14-NEXT:    ret i32 0
13263 //
13264 //
13265 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
13266 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13267 // CHECK14-NEXT:  entry:
13268 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13269 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13270 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13271 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
13272 // CHECK14-NEXT:    ret void
13273 //
13274 //
13275 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14
13276 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13277 // CHECK14-NEXT:  entry:
13278 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13279 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13280 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13281 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13282 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13283 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13284 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13285 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13286 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13287 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13288 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13289 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13290 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13291 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13292 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13293 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
13294 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13295 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13296 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13297 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13298 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13299 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13300 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13301 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13302 // CHECK14:       cond.true:
13303 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13304 // CHECK14:       cond.false:
13305 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13306 // CHECK14-NEXT:    br label [[COND_END]]
13307 // CHECK14:       cond.end:
13308 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13309 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13310 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13311 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13312 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13313 // CHECK14:       omp.inner.for.cond:
13314 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
13315 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
13316 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13317 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13318 // CHECK14:       omp.inner.for.body:
13319 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !46
13320 // CHECK14-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
13321 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
13322 // CHECK14-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
13323 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46
13324 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13325 // CHECK14:       omp.inner.for.inc:
13326 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
13327 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !46
13328 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
13329 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
13330 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
13331 // CHECK14:       omp.inner.for.end:
13332 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13333 // CHECK14:       omp.loop.exit:
13334 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13335 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13336 // CHECK14-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13337 // CHECK14-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13338 // CHECK14:       .omp.final.then:
13339 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13340 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13341 // CHECK14:       .omp.final.done:
13342 // CHECK14-NEXT:    ret void
13343 //
13344 //
13345 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15
13346 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13347 // CHECK14-NEXT:  entry:
13348 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13349 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13350 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13351 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13352 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13353 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13354 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13355 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13356 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13357 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13358 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13359 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13360 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13361 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13362 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13363 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13364 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13365 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13366 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13367 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13368 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13369 // CHECK14-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
13370 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13371 // CHECK14-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
13372 // CHECK14-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
13373 // CHECK14-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
13374 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13375 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13376 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13377 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
13378 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13379 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13380 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
13381 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13382 // CHECK14:       cond.true:
13383 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13384 // CHECK14:       cond.false:
13385 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13386 // CHECK14-NEXT:    br label [[COND_END]]
13387 // CHECK14:       cond.end:
13388 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
13389 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13390 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13391 // CHECK14-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
13392 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13393 // CHECK14:       omp.inner.for.cond:
13394 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
13395 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !49
13396 // CHECK14-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
13397 // CHECK14-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13398 // CHECK14:       omp.inner.for.body:
13399 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
13400 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
13401 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13402 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !49
13403 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !49
13404 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
13405 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
13406 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !49
13407 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13408 // CHECK14:       omp.body.continue:
13409 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13410 // CHECK14:       omp.inner.for.inc:
13411 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
13412 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
13413 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
13414 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
13415 // CHECK14:       omp.inner.for.end:
13416 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13417 // CHECK14:       omp.loop.exit:
13418 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
13419 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13420 // CHECK14-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
13421 // CHECK14-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13422 // CHECK14:       .omp.final.then:
13423 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13424 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13425 // CHECK14:       .omp.final.done:
13426 // CHECK14-NEXT:    ret void
13427 //
13428 //
13429 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
13430 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13431 // CHECK14-NEXT:  entry:
13432 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13433 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13434 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13435 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
13436 // CHECK14-NEXT:    ret void
13437 //
13438 //
13439 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17
13440 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13441 // CHECK14-NEXT:  entry:
13442 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13443 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13444 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13445 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13446 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13447 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13448 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13449 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13450 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13451 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13452 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13453 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13454 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13455 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13456 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13457 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
13458 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13459 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13460 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13461 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13462 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13463 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13464 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13465 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13466 // CHECK14:       cond.true:
13467 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13468 // CHECK14:       cond.false:
13469 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13470 // CHECK14-NEXT:    br label [[COND_END]]
13471 // CHECK14:       cond.end:
13472 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13473 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13474 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13475 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13476 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13477 // CHECK14:       omp.inner.for.cond:
13478 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
13479 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
13480 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13481 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13482 // CHECK14:       omp.inner.for.body:
13483 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !52
13484 // CHECK14-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
13485 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
13486 // CHECK14-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
13487 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52
13488 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13489 // CHECK14:       omp.inner.for.inc:
13490 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
13491 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !52
13492 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
13493 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
13494 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
13495 // CHECK14:       omp.inner.for.end:
13496 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13497 // CHECK14:       omp.loop.exit:
13498 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13499 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13500 // CHECK14-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13501 // CHECK14-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13502 // CHECK14:       .omp.final.then:
13503 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13504 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13505 // CHECK14:       .omp.final.done:
13506 // CHECK14-NEXT:    ret void
13507 //
13508 //
13509 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18
13510 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13511 // CHECK14-NEXT:  entry:
13512 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13513 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13514 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13515 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13516 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13517 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13518 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13519 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13520 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13521 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13522 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13523 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13524 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13525 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13526 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13527 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13528 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13529 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13530 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13531 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13532 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13533 // CHECK14-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
13534 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13535 // CHECK14-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
13536 // CHECK14-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
13537 // CHECK14-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
13538 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13539 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13540 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13541 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
13542 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13543 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13544 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
13545 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13546 // CHECK14:       cond.true:
13547 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13548 // CHECK14:       cond.false:
13549 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13550 // CHECK14-NEXT:    br label [[COND_END]]
13551 // CHECK14:       cond.end:
13552 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
13553 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13554 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13555 // CHECK14-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
13556 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13557 // CHECK14:       omp.inner.for.cond:
13558 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
13559 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !55
13560 // CHECK14-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
13561 // CHECK14-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13562 // CHECK14:       omp.inner.for.body:
13563 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
13564 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
13565 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13566 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !55
13567 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !55
13568 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
13569 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
13570 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !55
13571 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13572 // CHECK14:       omp.body.continue:
13573 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13574 // CHECK14:       omp.inner.for.inc:
13575 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
13576 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
13577 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
13578 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
13579 // CHECK14:       omp.inner.for.end:
13580 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13581 // CHECK14:       omp.loop.exit:
13582 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
13583 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13584 // CHECK14-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
13585 // CHECK14-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13586 // CHECK14:       .omp.final.then:
13587 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13588 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13589 // CHECK14:       .omp.final.done:
13590 // CHECK14-NEXT:    ret void
13591 //
13592 //
13593 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
13594 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13595 // CHECK14-NEXT:  entry:
13596 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13597 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13598 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13599 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13600 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13601 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13602 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13603 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
13604 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13605 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
13606 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
13607 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
13608 // CHECK14-NEXT:    ret void
13609 //
13610 //
13611 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21
13612 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13613 // CHECK14-NEXT:  entry:
13614 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13615 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13616 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13617 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13618 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13619 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13620 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13621 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13622 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13623 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13624 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13625 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13626 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13627 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13628 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13629 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13630 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13631 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13632 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13633 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
13634 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13635 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13636 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13637 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13638 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13639 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13640 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13641 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13642 // CHECK14:       cond.true:
13643 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13644 // CHECK14:       cond.false:
13645 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13646 // CHECK14-NEXT:    br label [[COND_END]]
13647 // CHECK14:       cond.end:
13648 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13649 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13650 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13651 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13652 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13653 // CHECK14:       omp.inner.for.cond:
13654 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
13655 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
13656 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13657 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13658 // CHECK14:       omp.inner.for.body:
13659 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !58
13660 // CHECK14-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
13661 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
13662 // CHECK14-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
13663 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !58
13664 // CHECK14-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13665 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58
13666 // CHECK14-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58
13667 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58
13668 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13669 // CHECK14:       omp.inner.for.inc:
13670 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
13671 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !58
13672 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
13673 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
13674 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
13675 // CHECK14:       omp.inner.for.end:
13676 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13677 // CHECK14:       omp.loop.exit:
13678 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13679 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13680 // CHECK14-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
13681 // CHECK14-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13682 // CHECK14:       .omp.final.then:
13683 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13684 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13685 // CHECK14:       .omp.final.done:
13686 // CHECK14-NEXT:    ret void
13687 //
13688 //
13689 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22
13690 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13691 // CHECK14-NEXT:  entry:
13692 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13693 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13694 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13695 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13696 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13697 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13698 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13699 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13700 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13701 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13702 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13703 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13704 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13705 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13706 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13707 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13708 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13709 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13710 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13711 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13712 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13713 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13714 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13715 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13716 // CHECK14-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
13717 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13718 // CHECK14-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
13719 // CHECK14-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
13720 // CHECK14-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
13721 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13722 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13723 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
13724 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13725 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
13726 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
13727 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13728 // CHECK14:       omp.dispatch.cond:
13729 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13730 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13731 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
13732 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
13733 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13734 // CHECK14:       cond.true:
13735 // CHECK14-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13736 // CHECK14-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
13737 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13738 // CHECK14:       cond.false:
13739 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13740 // CHECK14-NEXT:    br label [[COND_END]]
13741 // CHECK14:       cond.end:
13742 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
13743 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13744 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13745 // CHECK14-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
13746 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13747 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13748 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
13749 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13750 // CHECK14:       omp.dispatch.body:
13751 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13752 // CHECK14:       omp.inner.for.cond:
13753 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
13754 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !61
13755 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
13756 // CHECK14-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13757 // CHECK14:       omp.inner.for.body:
13758 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
13759 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
13760 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13761 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !61
13762 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !61
13763 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
13764 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
13765 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !61
13766 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13767 // CHECK14:       omp.body.continue:
13768 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13769 // CHECK14:       omp.inner.for.inc:
13770 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
13771 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
13772 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
13773 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
13774 // CHECK14:       omp.inner.for.end:
13775 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13776 // CHECK14:       omp.dispatch.inc:
13777 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13778 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13779 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13780 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
13781 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13782 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13783 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
13784 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
13785 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
13786 // CHECK14:       omp.dispatch.end:
13787 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
13788 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13789 // CHECK14-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13790 // CHECK14-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13791 // CHECK14:       .omp.final.then:
13792 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13793 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13794 // CHECK14:       .omp.final.done:
13795 // CHECK14-NEXT:    ret void
13796 //
13797 //
13798 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
13799 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13800 // CHECK14-NEXT:  entry:
13801 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13802 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13803 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13804 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
13805 // CHECK14-NEXT:    ret void
13806 //
13807 //
13808 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25
13809 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13810 // CHECK14-NEXT:  entry:
13811 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13812 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13813 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13814 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13815 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13816 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13817 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13818 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13819 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13820 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13821 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13822 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13823 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13824 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13825 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13826 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
13827 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13828 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13829 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13830 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13831 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13832 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13833 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13834 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13835 // CHECK14:       cond.true:
13836 // CHECK14-NEXT:    br label [[COND_END:%.*]]
13837 // CHECK14:       cond.false:
13838 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13839 // CHECK14-NEXT:    br label [[COND_END]]
13840 // CHECK14:       cond.end:
13841 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13842 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13843 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13844 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13845 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13846 // CHECK14:       omp.inner.for.cond:
13847 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
13848 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
13849 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13850 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13851 // CHECK14:       omp.inner.for.body:
13852 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !64
13853 // CHECK14-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
13854 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
13855 // CHECK14-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
13856 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64
13857 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13858 // CHECK14:       omp.inner.for.inc:
13859 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
13860 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !64
13861 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
13862 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
13863 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
13864 // CHECK14:       omp.inner.for.end:
13865 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13866 // CHECK14:       omp.loop.exit:
13867 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13868 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13869 // CHECK14-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13870 // CHECK14-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13871 // CHECK14:       .omp.final.then:
13872 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13873 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13874 // CHECK14:       .omp.final.done:
13875 // CHECK14-NEXT:    ret void
13876 //
13877 //
13878 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26
13879 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13880 // CHECK14-NEXT:  entry:
13881 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13882 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13883 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13884 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13885 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13886 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13887 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13888 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13889 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13890 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13891 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13892 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13893 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13894 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13895 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13896 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13897 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13898 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13899 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13900 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13901 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13902 // CHECK14-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
13903 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13904 // CHECK14-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
13905 // CHECK14-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
13906 // CHECK14-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
13907 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13908 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13909 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13910 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13911 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13912 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
13913 // CHECK14-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
13914 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13915 // CHECK14:       omp.dispatch.cond:
13916 // CHECK14-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13917 // CHECK14-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
13918 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13919 // CHECK14:       omp.dispatch.body:
13920 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13921 // CHECK14-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
13922 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13923 // CHECK14:       omp.inner.for.cond:
13924 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
13925 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !67
13926 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
13927 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13928 // CHECK14:       omp.inner.for.body:
13929 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
13930 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
13931 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13932 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !67
13933 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !67
13934 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
13935 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
13936 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !67
13937 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13938 // CHECK14:       omp.body.continue:
13939 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13940 // CHECK14:       omp.inner.for.inc:
13941 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
13942 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
13943 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
13944 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
13945 // CHECK14:       omp.inner.for.end:
13946 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13947 // CHECK14:       omp.dispatch.inc:
13948 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
13949 // CHECK14:       omp.dispatch.end:
13950 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13951 // CHECK14-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13952 // CHECK14-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13953 // CHECK14:       .omp.final.then:
13954 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
13955 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13956 // CHECK14:       .omp.final.done:
13957 // CHECK14-NEXT:    ret void
13958 //
13959 //
13960 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
13961 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13962 // CHECK14-NEXT:  entry:
13963 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13964 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13965 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13966 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13967 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13968 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13969 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13970 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
13971 // CHECK14-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13972 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
13973 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
13974 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
13975 // CHECK14-NEXT:    ret void
13976 //
13977 //
13978 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29
13979 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13980 // CHECK14-NEXT:  entry:
13981 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13982 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13983 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
13984 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13985 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13986 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13987 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13988 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13989 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13990 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13991 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13992 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13993 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13994 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13995 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
13996 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13997 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
13998 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13999 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14000 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
14001 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14002 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14003 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14004 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
14005 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14006 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14007 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
14008 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14009 // CHECK14:       cond.true:
14010 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14011 // CHECK14:       cond.false:
14012 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14013 // CHECK14-NEXT:    br label [[COND_END]]
14014 // CHECK14:       cond.end:
14015 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
14016 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14017 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14018 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14019 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14020 // CHECK14:       omp.inner.for.cond:
14021 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
14022 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
14023 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
14024 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14025 // CHECK14:       omp.inner.for.body:
14026 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !70
14027 // CHECK14-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
14028 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
14029 // CHECK14-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
14030 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !70
14031 // CHECK14-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
14032 // CHECK14-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70
14033 // CHECK14-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70
14034 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70
14035 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14036 // CHECK14:       omp.inner.for.inc:
14037 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
14038 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !70
14039 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
14040 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
14041 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
14042 // CHECK14:       omp.inner.for.end:
14043 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14044 // CHECK14:       omp.loop.exit:
14045 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
14046 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14047 // CHECK14-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
14048 // CHECK14-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14049 // CHECK14:       .omp.final.then:
14050 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
14051 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14052 // CHECK14:       .omp.final.done:
14053 // CHECK14-NEXT:    ret void
14054 //
14055 //
14056 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30
14057 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
14058 // CHECK14-NEXT:  entry:
14059 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14060 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14061 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14062 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14063 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
14064 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
14065 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14066 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14067 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14068 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14069 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14070 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14071 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14072 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14073 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14074 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14075 // CHECK14-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14076 // CHECK14-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
14077 // CHECK14-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
14078 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
14079 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
14080 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14081 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14082 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14083 // CHECK14-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
14084 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14085 // CHECK14-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
14086 // CHECK14-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
14087 // CHECK14-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
14088 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14089 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14090 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
14091 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14092 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14093 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14094 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
14095 // CHECK14-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
14096 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
14097 // CHECK14:       omp.dispatch.cond:
14098 // CHECK14-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
14099 // CHECK14-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
14100 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14101 // CHECK14:       omp.dispatch.body:
14102 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14103 // CHECK14-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
14104 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14105 // CHECK14:       omp.inner.for.cond:
14106 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
14107 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !73
14108 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
14109 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14110 // CHECK14:       omp.inner.for.body:
14111 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
14112 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
14113 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14114 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !73
14115 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !73
14116 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
14117 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
14118 // CHECK14-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !73
14119 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14120 // CHECK14:       omp.body.continue:
14121 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14122 // CHECK14:       omp.inner.for.inc:
14123 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
14124 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
14125 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
14126 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
14127 // CHECK14:       omp.inner.for.end:
14128 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
14129 // CHECK14:       omp.dispatch.inc:
14130 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
14131 // CHECK14:       omp.dispatch.end:
14132 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14133 // CHECK14-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
14134 // CHECK14-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14135 // CHECK14:       .omp.final.then:
14136 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
14137 // CHECK14-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14138 // CHECK14:       .omp.final.done:
14139 // CHECK14-NEXT:    ret void
14140 //
14141 //
14142 // CHECK14-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
14143 // CHECK14-SAME: () #[[ATTR5:[0-9]+]] {
14144 // CHECK14-NEXT:  entry:
14145 // CHECK14-NEXT:    call void @__tgt_register_requires(i64 1)
14146 // CHECK14-NEXT:    ret void
14147 //
14148 //
14149 // CHECK15-LABEL: define {{[^@]+}}@main
14150 // CHECK15-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
14151 // CHECK15-NEXT:  entry:
14152 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
14153 // CHECK15-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
14154 // CHECK15-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
14155 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
14156 // CHECK15-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
14157 // CHECK15-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
14158 // CHECK15-NEXT:    [[M:%.*]] = alloca i32, align 4
14159 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14160 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
14161 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
14162 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
14163 // CHECK15-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
14164 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14165 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14166 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14167 // CHECK15-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
14168 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
14169 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
14170 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
14171 // CHECK15-NEXT:    [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
14172 // CHECK15-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
14173 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
14174 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
14175 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
14176 // CHECK15-NEXT:    [[N_CASTED18:%.*]] = alloca i32, align 4
14177 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14178 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
14179 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
14180 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
14181 // CHECK15-NEXT:    [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
14182 // CHECK15-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
14183 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
14184 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
14185 // CHECK15-NEXT:    [[N_CASTED32:%.*]] = alloca i32, align 4
14186 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
14187 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
14188 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
14189 // CHECK15-NEXT:    [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
14190 // CHECK15-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
14191 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
14192 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
14193 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
14194 // CHECK15-NEXT:    [[N_CASTED47:%.*]] = alloca i32, align 4
14195 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
14196 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
14197 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
14198 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
14199 // CHECK15-NEXT:    [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
14200 // CHECK15-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
14201 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
14202 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
14203 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
14204 // CHECK15-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
14205 // CHECK15-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
14206 // CHECK15-NEXT:    store i32 100, i32* [[N]], align 4
14207 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
14208 // CHECK15-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
14209 // CHECK15-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
14210 // CHECK15-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
14211 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
14212 // CHECK15-NEXT:    store i32 10, i32* [[M]], align 4
14213 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
14214 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
14215 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
14216 // CHECK15-NEXT:    [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
14217 // CHECK15-NEXT:    [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
14218 // CHECK15-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
14219 // CHECK15-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
14220 // CHECK15-NEXT:    store i32 [[TMP3]], i32* [[TMP7]], align 4
14221 // CHECK15-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
14222 // CHECK15-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
14223 // CHECK15-NEXT:    store i32 [[TMP3]], i32* [[TMP9]], align 4
14224 // CHECK15-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
14225 // CHECK15-NEXT:    store i64 4, i64* [[TMP10]], align 4
14226 // CHECK15-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
14227 // CHECK15-NEXT:    store i8* null, i8** [[TMP11]], align 4
14228 // CHECK15-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
14229 // CHECK15-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
14230 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP13]], align 4
14231 // CHECK15-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
14232 // CHECK15-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
14233 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP15]], align 4
14234 // CHECK15-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
14235 // CHECK15-NEXT:    store i64 4, i64* [[TMP16]], align 4
14236 // CHECK15-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
14237 // CHECK15-NEXT:    store i8* null, i8** [[TMP17]], align 4
14238 // CHECK15-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
14239 // CHECK15-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
14240 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 4
14241 // CHECK15-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
14242 // CHECK15-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
14243 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 4
14244 // CHECK15-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
14245 // CHECK15-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 4
14246 // CHECK15-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
14247 // CHECK15-NEXT:    store i8* null, i8** [[TMP23]], align 4
14248 // CHECK15-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
14249 // CHECK15-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
14250 // CHECK15-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
14251 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
14252 // CHECK15-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
14253 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14254 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
14255 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14256 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14257 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14258 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14259 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
14260 // CHECK15-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
14261 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
14262 // CHECK15-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
14263 // CHECK15-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14264 // CHECK15-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
14265 // CHECK15:       omp_offload.failed:
14266 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
14267 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT]]
14268 // CHECK15:       omp_offload.cont:
14269 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
14270 // CHECK15-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
14271 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
14272 // CHECK15-NEXT:    [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
14273 // CHECK15-NEXT:    [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
14274 // CHECK15-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
14275 // CHECK15-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
14276 // CHECK15-NEXT:    store i32 [[TMP34]], i32* [[TMP38]], align 4
14277 // CHECK15-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
14278 // CHECK15-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
14279 // CHECK15-NEXT:    store i32 [[TMP34]], i32* [[TMP40]], align 4
14280 // CHECK15-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
14281 // CHECK15-NEXT:    store i64 4, i64* [[TMP41]], align 4
14282 // CHECK15-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
14283 // CHECK15-NEXT:    store i8* null, i8** [[TMP42]], align 4
14284 // CHECK15-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
14285 // CHECK15-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
14286 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP44]], align 4
14287 // CHECK15-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
14288 // CHECK15-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
14289 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP46]], align 4
14290 // CHECK15-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
14291 // CHECK15-NEXT:    store i64 4, i64* [[TMP47]], align 4
14292 // CHECK15-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
14293 // CHECK15-NEXT:    store i8* null, i8** [[TMP48]], align 4
14294 // CHECK15-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
14295 // CHECK15-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
14296 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP50]], align 4
14297 // CHECK15-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
14298 // CHECK15-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
14299 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP52]], align 4
14300 // CHECK15-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
14301 // CHECK15-NEXT:    store i64 [[TMP36]], i64* [[TMP53]], align 4
14302 // CHECK15-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
14303 // CHECK15-NEXT:    store i8* null, i8** [[TMP54]], align 4
14304 // CHECK15-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
14305 // CHECK15-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
14306 // CHECK15-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
14307 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[N]], align 4
14308 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
14309 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
14310 // CHECK15-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
14311 // CHECK15-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
14312 // CHECK15-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
14313 // CHECK15-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
14314 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
14315 // CHECK15-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
14316 // CHECK15-NEXT:    [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
14317 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
14318 // CHECK15-NEXT:    [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
14319 // CHECK15-NEXT:    [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
14320 // CHECK15-NEXT:    br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
14321 // CHECK15:       omp_offload.failed15:
14322 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
14323 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
14324 // CHECK15:       omp_offload.cont16:
14325 // CHECK15-NEXT:    [[TMP64:%.*]] = load i32, i32* [[M]], align 4
14326 // CHECK15-NEXT:    store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
14327 // CHECK15-NEXT:    [[TMP65:%.*]] = load i32, i32* [[N]], align 4
14328 // CHECK15-NEXT:    store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
14329 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
14330 // CHECK15-NEXT:    [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
14331 // CHECK15-NEXT:    store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
14332 // CHECK15-NEXT:    [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
14333 // CHECK15-NEXT:    [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
14334 // CHECK15-NEXT:    [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
14335 // CHECK15-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
14336 // CHECK15-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
14337 // CHECK15-NEXT:    store i32 [[TMP66]], i32* [[TMP72]], align 4
14338 // CHECK15-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
14339 // CHECK15-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
14340 // CHECK15-NEXT:    store i32 [[TMP66]], i32* [[TMP74]], align 4
14341 // CHECK15-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
14342 // CHECK15-NEXT:    store i64 4, i64* [[TMP75]], align 4
14343 // CHECK15-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
14344 // CHECK15-NEXT:    store i8* null, i8** [[TMP76]], align 4
14345 // CHECK15-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
14346 // CHECK15-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
14347 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP78]], align 4
14348 // CHECK15-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
14349 // CHECK15-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
14350 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP80]], align 4
14351 // CHECK15-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
14352 // CHECK15-NEXT:    store i64 4, i64* [[TMP81]], align 4
14353 // CHECK15-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
14354 // CHECK15-NEXT:    store i8* null, i8** [[TMP82]], align 4
14355 // CHECK15-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
14356 // CHECK15-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
14357 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 4
14358 // CHECK15-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
14359 // CHECK15-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
14360 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP86]], align 4
14361 // CHECK15-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
14362 // CHECK15-NEXT:    store i64 [[TMP70]], i64* [[TMP87]], align 4
14363 // CHECK15-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
14364 // CHECK15-NEXT:    store i8* null, i8** [[TMP88]], align 4
14365 // CHECK15-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
14366 // CHECK15-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
14367 // CHECK15-NEXT:    store i32 [[TMP68]], i32* [[TMP90]], align 4
14368 // CHECK15-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
14369 // CHECK15-NEXT:    [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
14370 // CHECK15-NEXT:    store i32 [[TMP68]], i32* [[TMP92]], align 4
14371 // CHECK15-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
14372 // CHECK15-NEXT:    store i64 4, i64* [[TMP93]], align 4
14373 // CHECK15-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
14374 // CHECK15-NEXT:    store i8* null, i8** [[TMP94]], align 4
14375 // CHECK15-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
14376 // CHECK15-NEXT:    [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
14377 // CHECK15-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
14378 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[N]], align 4
14379 // CHECK15-NEXT:    store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
14380 // CHECK15-NEXT:    [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
14381 // CHECK15-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
14382 // CHECK15-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
14383 // CHECK15-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
14384 // CHECK15-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
14385 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
14386 // CHECK15-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
14387 // CHECK15-NEXT:    [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
14388 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
14389 // CHECK15-NEXT:    [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
14390 // CHECK15-NEXT:    [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
14391 // CHECK15-NEXT:    br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
14392 // CHECK15:       omp_offload.failed30:
14393 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
14394 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
14395 // CHECK15:       omp_offload.cont31:
14396 // CHECK15-NEXT:    [[TMP104:%.*]] = load i32, i32* [[N]], align 4
14397 // CHECK15-NEXT:    store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
14398 // CHECK15-NEXT:    [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
14399 // CHECK15-NEXT:    [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
14400 // CHECK15-NEXT:    [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
14401 // CHECK15-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
14402 // CHECK15-NEXT:    [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
14403 // CHECK15-NEXT:    store i32 [[TMP105]], i32* [[TMP109]], align 4
14404 // CHECK15-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
14405 // CHECK15-NEXT:    [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
14406 // CHECK15-NEXT:    store i32 [[TMP105]], i32* [[TMP111]], align 4
14407 // CHECK15-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
14408 // CHECK15-NEXT:    store i64 4, i64* [[TMP112]], align 4
14409 // CHECK15-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
14410 // CHECK15-NEXT:    store i8* null, i8** [[TMP113]], align 4
14411 // CHECK15-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
14412 // CHECK15-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
14413 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP115]], align 4
14414 // CHECK15-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
14415 // CHECK15-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
14416 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP117]], align 4
14417 // CHECK15-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
14418 // CHECK15-NEXT:    store i64 4, i64* [[TMP118]], align 4
14419 // CHECK15-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
14420 // CHECK15-NEXT:    store i8* null, i8** [[TMP119]], align 4
14421 // CHECK15-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
14422 // CHECK15-NEXT:    [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
14423 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP121]], align 4
14424 // CHECK15-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
14425 // CHECK15-NEXT:    [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
14426 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP123]], align 4
14427 // CHECK15-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
14428 // CHECK15-NEXT:    store i64 [[TMP107]], i64* [[TMP124]], align 4
14429 // CHECK15-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
14430 // CHECK15-NEXT:    store i8* null, i8** [[TMP125]], align 4
14431 // CHECK15-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
14432 // CHECK15-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
14433 // CHECK15-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
14434 // CHECK15-NEXT:    [[TMP129:%.*]] = load i32, i32* [[N]], align 4
14435 // CHECK15-NEXT:    store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
14436 // CHECK15-NEXT:    [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
14437 // CHECK15-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
14438 // CHECK15-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
14439 // CHECK15-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
14440 // CHECK15-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
14441 // CHECK15-NEXT:    [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
14442 // CHECK15-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
14443 // CHECK15-NEXT:    [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
14444 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
14445 // CHECK15-NEXT:    [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
14446 // CHECK15-NEXT:    [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
14447 // CHECK15-NEXT:    br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
14448 // CHECK15:       omp_offload.failed44:
14449 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
14450 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
14451 // CHECK15:       omp_offload.cont45:
14452 // CHECK15-NEXT:    [[TMP135:%.*]] = load i32, i32* [[M]], align 4
14453 // CHECK15-NEXT:    store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
14454 // CHECK15-NEXT:    [[TMP136:%.*]] = load i32, i32* [[N]], align 4
14455 // CHECK15-NEXT:    store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
14456 // CHECK15-NEXT:    [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
14457 // CHECK15-NEXT:    [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
14458 // CHECK15-NEXT:    store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
14459 // CHECK15-NEXT:    [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
14460 // CHECK15-NEXT:    [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
14461 // CHECK15-NEXT:    [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
14462 // CHECK15-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
14463 // CHECK15-NEXT:    [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
14464 // CHECK15-NEXT:    store i32 [[TMP137]], i32* [[TMP143]], align 4
14465 // CHECK15-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
14466 // CHECK15-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
14467 // CHECK15-NEXT:    store i32 [[TMP137]], i32* [[TMP145]], align 4
14468 // CHECK15-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
14469 // CHECK15-NEXT:    store i64 4, i64* [[TMP146]], align 4
14470 // CHECK15-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
14471 // CHECK15-NEXT:    store i8* null, i8** [[TMP147]], align 4
14472 // CHECK15-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
14473 // CHECK15-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
14474 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP149]], align 4
14475 // CHECK15-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
14476 // CHECK15-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
14477 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[TMP151]], align 4
14478 // CHECK15-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
14479 // CHECK15-NEXT:    store i64 4, i64* [[TMP152]], align 4
14480 // CHECK15-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
14481 // CHECK15-NEXT:    store i8* null, i8** [[TMP153]], align 4
14482 // CHECK15-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
14483 // CHECK15-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
14484 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP155]], align 4
14485 // CHECK15-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
14486 // CHECK15-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
14487 // CHECK15-NEXT:    store i32* [[VLA]], i32** [[TMP157]], align 4
14488 // CHECK15-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
14489 // CHECK15-NEXT:    store i64 [[TMP141]], i64* [[TMP158]], align 4
14490 // CHECK15-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
14491 // CHECK15-NEXT:    store i8* null, i8** [[TMP159]], align 4
14492 // CHECK15-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
14493 // CHECK15-NEXT:    [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
14494 // CHECK15-NEXT:    store i32 [[TMP139]], i32* [[TMP161]], align 4
14495 // CHECK15-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
14496 // CHECK15-NEXT:    [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
14497 // CHECK15-NEXT:    store i32 [[TMP139]], i32* [[TMP163]], align 4
14498 // CHECK15-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
14499 // CHECK15-NEXT:    store i64 4, i64* [[TMP164]], align 4
14500 // CHECK15-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
14501 // CHECK15-NEXT:    store i8* null, i8** [[TMP165]], align 4
14502 // CHECK15-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
14503 // CHECK15-NEXT:    [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
14504 // CHECK15-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
14505 // CHECK15-NEXT:    [[TMP169:%.*]] = load i32, i32* [[N]], align 4
14506 // CHECK15-NEXT:    store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
14507 // CHECK15-NEXT:    [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
14508 // CHECK15-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
14509 // CHECK15-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
14510 // CHECK15-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
14511 // CHECK15-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
14512 // CHECK15-NEXT:    [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
14513 // CHECK15-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
14514 // CHECK15-NEXT:    [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
14515 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
14516 // CHECK15-NEXT:    [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
14517 // CHECK15-NEXT:    [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
14518 // CHECK15-NEXT:    br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
14519 // CHECK15:       omp_offload.failed60:
14520 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
14521 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
14522 // CHECK15:       omp_offload.cont61:
14523 // CHECK15-NEXT:    [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
14524 // CHECK15-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
14525 // CHECK15-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
14526 // CHECK15-NEXT:    [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
14527 // CHECK15-NEXT:    call void @llvm.stackrestore(i8* [[TMP176]])
14528 // CHECK15-NEXT:    [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
14529 // CHECK15-NEXT:    ret i32 [[TMP177]]
14530 //
14531 //
14532 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
14533 // CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
14534 // CHECK15-NEXT:  entry:
14535 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14536 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14537 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14538 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14539 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14540 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14541 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14542 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14543 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14544 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14545 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
14546 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
14547 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
14548 // CHECK15-NEXT:    ret void
14549 //
14550 //
14551 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined.
14552 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14553 // CHECK15-NEXT:  entry:
14554 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14555 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14556 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14557 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14558 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14559 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14560 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14561 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14562 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14563 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
14564 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14565 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14566 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14567 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14568 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
14569 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14570 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14571 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14572 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14573 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14574 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14575 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14576 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14577 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14578 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
14579 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14580 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
14581 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14582 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14583 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14584 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
14585 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14586 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
14587 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14588 // CHECK15:       omp.precond.then:
14589 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14590 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14591 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
14592 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14593 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14594 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14595 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
14596 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14597 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14598 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14599 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
14600 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14601 // CHECK15:       cond.true:
14602 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14603 // CHECK15-NEXT:    br label [[COND_END:%.*]]
14604 // CHECK15:       cond.false:
14605 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14606 // CHECK15-NEXT:    br label [[COND_END]]
14607 // CHECK15:       cond.end:
14608 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
14609 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14610 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14611 // CHECK15-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
14612 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14613 // CHECK15:       omp.inner.for.cond:
14614 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
14615 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
14616 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
14617 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14618 // CHECK15:       omp.inner.for.body:
14619 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14
14620 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
14621 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !14
14622 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !14
14623 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !14
14624 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !14
14625 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14626 // CHECK15:       omp.inner.for.inc:
14627 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
14628 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14
14629 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
14630 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
14631 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
14632 // CHECK15:       omp.inner.for.end:
14633 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14634 // CHECK15:       omp.loop.exit:
14635 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14636 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
14637 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
14638 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14639 // CHECK15-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14640 // CHECK15-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14641 // CHECK15:       .omp.final.then:
14642 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14643 // CHECK15-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
14644 // CHECK15-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14645 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14646 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14647 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14648 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14649 // CHECK15:       .omp.final.done:
14650 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
14651 // CHECK15:       omp.precond.end:
14652 // CHECK15-NEXT:    ret void
14653 //
14654 //
14655 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..1
14656 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14657 // CHECK15-NEXT:  entry:
14658 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14659 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14660 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14661 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14662 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14663 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14664 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14665 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14666 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14667 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14668 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14669 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
14670 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14671 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14672 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14673 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14674 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
14675 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14676 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14677 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14678 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14679 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14680 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14681 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14682 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14683 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14684 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14685 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
14686 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14687 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
14688 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14689 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14690 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14691 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
14692 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14693 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
14694 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14695 // CHECK15:       omp.precond.then:
14696 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14697 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14698 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
14699 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14700 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14701 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
14702 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14703 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14704 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14705 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14706 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14707 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14708 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14709 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14710 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14711 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14712 // CHECK15:       cond.true:
14713 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14714 // CHECK15-NEXT:    br label [[COND_END:%.*]]
14715 // CHECK15:       cond.false:
14716 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14717 // CHECK15-NEXT:    br label [[COND_END]]
14718 // CHECK15:       cond.end:
14719 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14720 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14721 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14722 // CHECK15-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14723 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14724 // CHECK15:       omp.inner.for.cond:
14725 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14726 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
14727 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14728 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14729 // CHECK15:       omp.inner.for.body:
14730 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14731 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
14732 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14733 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !18
14734 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !18
14735 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
14736 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
14737 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14738 // CHECK15:       omp.body.continue:
14739 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14740 // CHECK15:       omp.inner.for.inc:
14741 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14742 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
14743 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14744 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
14745 // CHECK15:       omp.inner.for.end:
14746 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14747 // CHECK15:       omp.loop.exit:
14748 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14749 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
14750 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
14751 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14752 // CHECK15-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14753 // CHECK15-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14754 // CHECK15:       .omp.final.then:
14755 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14756 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
14757 // CHECK15-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14758 // CHECK15-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14759 // CHECK15-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14760 // CHECK15-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
14761 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14762 // CHECK15:       .omp.final.done:
14763 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
14764 // CHECK15:       omp.precond.end:
14765 // CHECK15-NEXT:    ret void
14766 //
14767 //
14768 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
14769 // CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14770 // CHECK15-NEXT:  entry:
14771 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14772 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14773 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14774 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14775 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14776 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14777 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14778 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14779 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14780 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14781 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
14782 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
14783 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
14784 // CHECK15-NEXT:    ret void
14785 //
14786 //
14787 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..2
14788 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14789 // CHECK15-NEXT:  entry:
14790 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14791 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14792 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14793 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14794 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14795 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14796 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14797 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14798 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14799 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
14800 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14801 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14802 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14803 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14804 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
14805 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14806 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14807 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14808 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14809 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14810 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14811 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14812 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14813 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14814 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
14815 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14816 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
14817 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14818 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14819 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14820 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
14821 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14822 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
14823 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14824 // CHECK15:       omp.precond.then:
14825 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14826 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14827 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
14828 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14829 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14830 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14831 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
14832 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14833 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14834 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14835 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
14836 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14837 // CHECK15:       cond.true:
14838 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14839 // CHECK15-NEXT:    br label [[COND_END:%.*]]
14840 // CHECK15:       cond.false:
14841 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14842 // CHECK15-NEXT:    br label [[COND_END]]
14843 // CHECK15:       cond.end:
14844 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
14845 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14846 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14847 // CHECK15-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
14848 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14849 // CHECK15:       omp.inner.for.cond:
14850 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
14851 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
14852 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
14853 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14854 // CHECK15:       omp.inner.for.body:
14855 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
14856 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
14857 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !23
14858 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !23
14859 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !23
14860 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !23
14861 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14862 // CHECK15:       omp.inner.for.inc:
14863 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
14864 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
14865 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
14866 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
14867 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
14868 // CHECK15:       omp.inner.for.end:
14869 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14870 // CHECK15:       omp.loop.exit:
14871 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14872 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
14873 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
14874 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14875 // CHECK15-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14876 // CHECK15-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14877 // CHECK15:       .omp.final.then:
14878 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14879 // CHECK15-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
14880 // CHECK15-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14881 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14882 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14883 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14884 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14885 // CHECK15:       .omp.final.done:
14886 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
14887 // CHECK15:       omp.precond.end:
14888 // CHECK15-NEXT:    ret void
14889 //
14890 //
14891 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..3
14892 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14893 // CHECK15-NEXT:  entry:
14894 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14895 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14896 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14897 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14898 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14899 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
14900 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
14901 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14902 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14903 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14904 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14905 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
14906 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14907 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14908 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14909 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14910 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
14911 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14912 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14913 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14914 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14915 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14916 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
14917 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
14918 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
14919 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
14920 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14921 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
14922 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14923 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
14924 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14925 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14926 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14927 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
14928 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14929 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
14930 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14931 // CHECK15:       omp.precond.then:
14932 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14933 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14934 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
14935 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14936 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14937 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
14938 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14939 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14940 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14941 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14942 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14943 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14944 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14945 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14946 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14947 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14948 // CHECK15:       cond.true:
14949 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14950 // CHECK15-NEXT:    br label [[COND_END:%.*]]
14951 // CHECK15:       cond.false:
14952 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14953 // CHECK15-NEXT:    br label [[COND_END]]
14954 // CHECK15:       cond.end:
14955 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14956 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14957 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14958 // CHECK15-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14959 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14960 // CHECK15:       omp.inner.for.cond:
14961 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14962 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
14963 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14964 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14965 // CHECK15:       omp.inner.for.body:
14966 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14967 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
14968 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14969 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !26
14970 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !26
14971 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
14972 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
14973 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14974 // CHECK15:       omp.body.continue:
14975 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14976 // CHECK15:       omp.inner.for.inc:
14977 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14978 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
14979 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14980 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
14981 // CHECK15:       omp.inner.for.end:
14982 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14983 // CHECK15:       omp.loop.exit:
14984 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14985 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
14986 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
14987 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14988 // CHECK15-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14989 // CHECK15-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14990 // CHECK15:       .omp.final.then:
14991 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14992 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
14993 // CHECK15-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14994 // CHECK15-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14995 // CHECK15-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14996 // CHECK15-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
14997 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14998 // CHECK15:       .omp.final.done:
14999 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15000 // CHECK15:       omp.precond.end:
15001 // CHECK15-NEXT:    ret void
15002 //
15003 //
15004 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
15005 // CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15006 // CHECK15-NEXT:  entry:
15007 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15008 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15009 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15010 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15011 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15012 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15013 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15014 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15015 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15016 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15017 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15018 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15019 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15020 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15021 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15022 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15023 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15024 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15025 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
15026 // CHECK15-NEXT:    ret void
15027 //
15028 //
15029 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5
15030 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15031 // CHECK15-NEXT:  entry:
15032 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15033 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15034 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15035 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15036 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15037 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15038 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15039 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15040 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15041 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15042 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15043 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15044 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15045 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15046 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15047 // CHECK15-NEXT:    [[I4:%.*]] = alloca i32, align 4
15048 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15049 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15050 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15051 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15052 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15053 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15054 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15055 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15056 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15057 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15058 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15059 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15060 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15061 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15062 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15063 // CHECK15-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15064 // CHECK15-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15065 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15066 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15067 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15068 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15069 // CHECK15:       omp.precond.then:
15070 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15071 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15072 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
15073 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15074 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15075 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15076 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15077 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
15078 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
15079 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15080 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15081 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
15082 // CHECK15-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15083 // CHECK15:       cond.true:
15084 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15085 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15086 // CHECK15:       cond.false:
15087 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15088 // CHECK15-NEXT:    br label [[COND_END]]
15089 // CHECK15:       cond.end:
15090 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
15091 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15092 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15093 // CHECK15-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
15094 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15095 // CHECK15:       omp.inner.for.cond:
15096 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
15097 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
15098 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
15099 // CHECK15-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
15100 // CHECK15-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15101 // CHECK15:       omp.inner.for.body:
15102 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
15103 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15104 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !29
15105 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4, !llvm.access.group !29
15106 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !29
15107 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29
15108 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
15109 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
15110 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29
15111 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15112 // CHECK15:       omp.inner.for.inc:
15113 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
15114 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
15115 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
15116 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
15117 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
15118 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
15119 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
15120 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
15121 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15122 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
15123 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
15124 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15125 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15126 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
15127 // CHECK15-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
15128 // CHECK15-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
15129 // CHECK15:       cond.true11:
15130 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
15131 // CHECK15-NEXT:    br label [[COND_END13:%.*]]
15132 // CHECK15:       cond.false12:
15133 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15134 // CHECK15-NEXT:    br label [[COND_END13]]
15135 // CHECK15:       cond.end13:
15136 // CHECK15-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
15137 // CHECK15-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
15138 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
15139 // CHECK15-NEXT:    store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
15140 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
15141 // CHECK15:       omp.inner.for.end:
15142 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15143 // CHECK15:       omp.loop.exit:
15144 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15145 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
15146 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
15147 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15148 // CHECK15-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
15149 // CHECK15-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15150 // CHECK15:       .omp.final.then:
15151 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15152 // CHECK15-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP37]], 0
15153 // CHECK15-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
15154 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
15155 // CHECK15-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
15156 // CHECK15-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
15157 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15158 // CHECK15:       .omp.final.done:
15159 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15160 // CHECK15:       omp.precond.end:
15161 // CHECK15-NEXT:    ret void
15162 //
15163 //
15164 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6
15165 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15166 // CHECK15-NEXT:  entry:
15167 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15168 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15169 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15170 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15171 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15172 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15173 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15174 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15175 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15176 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15177 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15178 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15179 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15180 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15181 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15182 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15183 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15184 // CHECK15-NEXT:    [[I4:%.*]] = alloca i32, align 4
15185 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15186 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15187 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15188 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15189 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15190 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15191 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15192 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15193 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15194 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15195 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15196 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15197 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15198 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15199 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15200 // CHECK15-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15201 // CHECK15-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15202 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15203 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15204 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15205 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15206 // CHECK15:       omp.precond.then:
15207 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15208 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15209 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
15210 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15211 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15212 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
15213 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15214 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15215 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15216 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15217 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
15218 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15219 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15220 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15221 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
15222 // CHECK15-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15223 // CHECK15:       cond.true:
15224 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15225 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15226 // CHECK15:       cond.false:
15227 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15228 // CHECK15-NEXT:    br label [[COND_END]]
15229 // CHECK15:       cond.end:
15230 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
15231 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
15232 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15233 // CHECK15-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
15234 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15235 // CHECK15:       omp.inner.for.cond:
15236 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
15237 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
15238 // CHECK15-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
15239 // CHECK15-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15240 // CHECK15:       omp.inner.for.body:
15241 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
15242 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
15243 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15244 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !32
15245 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !32
15246 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
15247 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
15248 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15249 // CHECK15:       omp.body.continue:
15250 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15251 // CHECK15:       omp.inner.for.inc:
15252 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
15253 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
15254 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
15255 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
15256 // CHECK15:       omp.inner.for.end:
15257 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15258 // CHECK15:       omp.loop.exit:
15259 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15260 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
15261 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
15262 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15263 // CHECK15-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
15264 // CHECK15-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15265 // CHECK15:       .omp.final.then:
15266 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15267 // CHECK15-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP24]], 0
15268 // CHECK15-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
15269 // CHECK15-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
15270 // CHECK15-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
15271 // CHECK15-NEXT:    store i32 [[ADD11]], i32* [[I4]], align 4
15272 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15273 // CHECK15:       .omp.final.done:
15274 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15275 // CHECK15:       omp.precond.end:
15276 // CHECK15-NEXT:    ret void
15277 //
15278 //
15279 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
15280 // CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
15281 // CHECK15-NEXT:  entry:
15282 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15283 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15284 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15285 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15286 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15287 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15288 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15289 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15290 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15291 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15292 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15293 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15294 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
15295 // CHECK15-NEXT:    ret void
15296 //
15297 //
15298 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8
15299 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
15300 // CHECK15-NEXT:  entry:
15301 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15302 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15303 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15304 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15305 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15306 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15307 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15308 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15309 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15310 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15311 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15312 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15313 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15314 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15315 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15316 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15317 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15318 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15319 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15320 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15321 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15322 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15323 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15324 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15325 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
15326 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15327 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15328 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15329 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15330 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15331 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15332 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15333 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15334 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15335 // CHECK15:       omp.precond.then:
15336 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15337 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15338 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
15339 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15340 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15341 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15342 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
15343 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15344 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15345 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15346 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
15347 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15348 // CHECK15:       cond.true:
15349 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15350 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15351 // CHECK15:       cond.false:
15352 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15353 // CHECK15-NEXT:    br label [[COND_END]]
15354 // CHECK15:       cond.end:
15355 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
15356 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15357 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15358 // CHECK15-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
15359 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15360 // CHECK15:       omp.inner.for.cond:
15361 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
15362 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
15363 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15364 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15365 // CHECK15:       omp.inner.for.body:
15366 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
15367 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
15368 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35
15369 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35
15370 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35
15371 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35
15372 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15373 // CHECK15:       omp.inner.for.inc:
15374 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
15375 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
15376 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
15377 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
15378 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
15379 // CHECK15:       omp.inner.for.end:
15380 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15381 // CHECK15:       omp.loop.exit:
15382 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15383 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
15384 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
15385 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15386 // CHECK15-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
15387 // CHECK15-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15388 // CHECK15:       .omp.final.then:
15389 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15390 // CHECK15-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
15391 // CHECK15-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
15392 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
15393 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
15394 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
15395 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15396 // CHECK15:       .omp.final.done:
15397 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15398 // CHECK15:       omp.precond.end:
15399 // CHECK15-NEXT:    ret void
15400 //
15401 //
15402 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9
15403 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
15404 // CHECK15-NEXT:  entry:
15405 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15406 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15407 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15408 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15409 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15410 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15411 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15412 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15413 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15414 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15415 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15416 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15417 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15418 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15419 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15420 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15421 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15422 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15423 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15424 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15425 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15426 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15427 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15428 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15429 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15430 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15431 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15432 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
15433 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15434 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15435 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15436 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15437 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15438 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15439 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15440 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15441 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15442 // CHECK15:       omp.precond.then:
15443 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15444 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15445 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
15446 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15447 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15448 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
15449 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15450 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15451 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15452 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15453 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15454 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15455 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
15456 // CHECK15-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
15457 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15458 // CHECK15:       omp.dispatch.cond:
15459 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15460 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
15461 // CHECK15-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15462 // CHECK15-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
15463 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15464 // CHECK15:       omp.dispatch.body:
15465 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15466 // CHECK15-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
15467 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15468 // CHECK15:       omp.inner.for.cond:
15469 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
15470 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
15471 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
15472 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15473 // CHECK15:       omp.inner.for.body:
15474 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
15475 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
15476 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15477 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !38
15478 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !38
15479 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
15480 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
15481 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15482 // CHECK15:       omp.body.continue:
15483 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15484 // CHECK15:       omp.inner.for.inc:
15485 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
15486 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
15487 // CHECK15-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
15488 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
15489 // CHECK15:       omp.inner.for.end:
15490 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15491 // CHECK15:       omp.dispatch.inc:
15492 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
15493 // CHECK15:       omp.dispatch.end:
15494 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15495 // CHECK15-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
15496 // CHECK15-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15497 // CHECK15:       .omp.final.then:
15498 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15499 // CHECK15-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP23]], 0
15500 // CHECK15-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
15501 // CHECK15-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
15502 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
15503 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
15504 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15505 // CHECK15:       .omp.final.done:
15506 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15507 // CHECK15:       omp.precond.end:
15508 // CHECK15-NEXT:    ret void
15509 //
15510 //
15511 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
15512 // CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15513 // CHECK15-NEXT:  entry:
15514 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15515 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15516 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15517 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15518 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15519 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15520 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15521 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15522 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15523 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15524 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15525 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15526 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15527 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15528 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15529 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15530 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15531 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15532 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
15533 // CHECK15-NEXT:    ret void
15534 //
15535 //
15536 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11
15537 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15538 // CHECK15-NEXT:  entry:
15539 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15540 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15541 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15542 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15543 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15544 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15545 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15546 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15547 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15548 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15549 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15550 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15551 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15552 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15553 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15554 // CHECK15-NEXT:    [[I4:%.*]] = alloca i32, align 4
15555 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15556 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15557 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15558 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15559 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15560 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15561 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15562 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15563 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15564 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15565 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15566 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15567 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15568 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15569 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15570 // CHECK15-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15571 // CHECK15-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15572 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15573 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15574 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15575 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15576 // CHECK15:       omp.precond.then:
15577 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15578 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15579 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
15580 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15581 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15582 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15583 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
15584 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15585 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15586 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15587 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
15588 // CHECK15-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15589 // CHECK15:       cond.true:
15590 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15591 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15592 // CHECK15:       cond.false:
15593 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15594 // CHECK15-NEXT:    br label [[COND_END]]
15595 // CHECK15:       cond.end:
15596 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
15597 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15598 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15599 // CHECK15-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
15600 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15601 // CHECK15:       omp.inner.for.cond:
15602 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
15603 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
15604 // CHECK15-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15605 // CHECK15-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15606 // CHECK15:       omp.inner.for.body:
15607 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !41
15608 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
15609 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !41
15610 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !41
15611 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !41
15612 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41
15613 // CHECK15-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
15614 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
15615 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41
15616 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15617 // CHECK15:       omp.inner.for.inc:
15618 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
15619 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !41
15620 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
15621 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
15622 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
15623 // CHECK15:       omp.inner.for.end:
15624 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15625 // CHECK15:       omp.loop.exit:
15626 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15627 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
15628 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
15629 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15630 // CHECK15-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
15631 // CHECK15-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15632 // CHECK15:       .omp.final.then:
15633 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15634 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
15635 // CHECK15-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15636 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
15637 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
15638 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
15639 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15640 // CHECK15:       .omp.final.done:
15641 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15642 // CHECK15:       omp.precond.end:
15643 // CHECK15-NEXT:    ret void
15644 //
15645 //
15646 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12
15647 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15648 // CHECK15-NEXT:  entry:
15649 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15650 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15651 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15652 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15653 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15654 // CHECK15-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
15655 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
15656 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15657 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15658 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15659 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15660 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15661 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15662 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15663 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15664 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15665 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15666 // CHECK15-NEXT:    [[I4:%.*]] = alloca i32, align 4
15667 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15668 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15669 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15670 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15671 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15672 // CHECK15-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
15673 // CHECK15-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
15674 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15675 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
15676 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
15677 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15678 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15679 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15680 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15681 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15682 // CHECK15-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15683 // CHECK15-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15684 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15685 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15686 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
15687 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15688 // CHECK15:       omp.precond.then:
15689 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15690 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15691 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
15692 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15693 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15694 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
15695 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15696 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15697 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15698 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
15699 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15700 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15701 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15702 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
15703 // CHECK15-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
15704 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15705 // CHECK15:       omp.dispatch.cond:
15706 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15707 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
15708 // CHECK15-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15709 // CHECK15-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
15710 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15711 // CHECK15:       omp.dispatch.body:
15712 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15713 // CHECK15-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
15714 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15715 // CHECK15:       omp.inner.for.cond:
15716 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15717 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
15718 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
15719 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15720 // CHECK15:       omp.inner.for.body:
15721 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15722 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
15723 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15724 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !44
15725 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !44
15726 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
15727 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
15728 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15729 // CHECK15:       omp.body.continue:
15730 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15731 // CHECK15:       omp.inner.for.inc:
15732 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15733 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
15734 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15735 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
15736 // CHECK15:       omp.inner.for.end:
15737 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15738 // CHECK15:       omp.dispatch.inc:
15739 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
15740 // CHECK15:       omp.dispatch.end:
15741 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15742 // CHECK15-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
15743 // CHECK15-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15744 // CHECK15:       .omp.final.then:
15745 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15746 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
15747 // CHECK15-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15748 // CHECK15-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
15749 // CHECK15-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
15750 // CHECK15-NEXT:    store i32 [[ADD10]], i32* [[I4]], align 4
15751 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15752 // CHECK15:       .omp.final.done:
15753 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15754 // CHECK15:       omp.precond.end:
15755 // CHECK15-NEXT:    ret void
15756 //
15757 //
15758 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
15759 // CHECK15-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
15760 // CHECK15-NEXT:  entry:
15761 // CHECK15-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
15762 // CHECK15-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
15763 // CHECK15-NEXT:    [[M:%.*]] = alloca i32, align 4
15764 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
15765 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
15766 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
15767 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15768 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
15769 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
15770 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
15771 // CHECK15-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
15772 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15773 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15774 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
15775 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
15776 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
15777 // CHECK15-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
15778 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
15779 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
15780 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
15781 // CHECK15-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
15782 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
15783 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
15784 // CHECK15-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
15785 // CHECK15-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
15786 // CHECK15-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
15787 // CHECK15-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
15788 // CHECK15-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
15789 // CHECK15-NEXT:    store i32 10, i32* [[M]], align 4
15790 // CHECK15-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15791 // CHECK15-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
15792 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
15793 // CHECK15-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15794 // CHECK15-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
15795 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
15796 // CHECK15-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
15797 // CHECK15-NEXT:    store i8* null, i8** [[TMP4]], align 4
15798 // CHECK15-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15799 // CHECK15-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15800 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
15801 // CHECK15-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
15802 // CHECK15-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
15803 // CHECK15-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
15804 // CHECK15:       omp_offload.failed:
15805 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
15806 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT]]
15807 // CHECK15:       omp_offload.cont:
15808 // CHECK15-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15809 // CHECK15-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
15810 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
15811 // CHECK15-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15812 // CHECK15-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
15813 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
15814 // CHECK15-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
15815 // CHECK15-NEXT:    store i8* null, i8** [[TMP13]], align 4
15816 // CHECK15-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15817 // CHECK15-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15818 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
15819 // CHECK15-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
15820 // CHECK15-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
15821 // CHECK15-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
15822 // CHECK15:       omp_offload.failed5:
15823 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
15824 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
15825 // CHECK15:       omp_offload.cont6:
15826 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
15827 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
15828 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15829 // CHECK15-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15830 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
15831 // CHECK15-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
15832 // CHECK15-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
15833 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
15834 // CHECK15-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
15835 // CHECK15-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
15836 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
15837 // CHECK15-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
15838 // CHECK15-NEXT:    store i8* null, i8** [[TMP25]], align 4
15839 // CHECK15-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
15840 // CHECK15-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
15841 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[TMP27]], align 4
15842 // CHECK15-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
15843 // CHECK15-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
15844 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[TMP29]], align 4
15845 // CHECK15-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
15846 // CHECK15-NEXT:    store i8* null, i8** [[TMP30]], align 4
15847 // CHECK15-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
15848 // CHECK15-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
15849 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
15850 // CHECK15-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
15851 // CHECK15-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
15852 // CHECK15-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
15853 // CHECK15:       omp_offload.failed11:
15854 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
15855 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
15856 // CHECK15:       omp_offload.cont12:
15857 // CHECK15-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
15858 // CHECK15-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
15859 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
15860 // CHECK15-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
15861 // CHECK15-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
15862 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
15863 // CHECK15-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
15864 // CHECK15-NEXT:    store i8* null, i8** [[TMP39]], align 4
15865 // CHECK15-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
15866 // CHECK15-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
15867 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
15868 // CHECK15-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
15869 // CHECK15-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
15870 // CHECK15-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
15871 // CHECK15:       omp_offload.failed17:
15872 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
15873 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
15874 // CHECK15:       omp_offload.cont18:
15875 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
15876 // CHECK15-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
15877 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
15878 // CHECK15-NEXT:    store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
15879 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
15880 // CHECK15-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
15881 // CHECK15-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
15882 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
15883 // CHECK15-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
15884 // CHECK15-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
15885 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
15886 // CHECK15-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
15887 // CHECK15-NEXT:    store i8* null, i8** [[TMP51]], align 4
15888 // CHECK15-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
15889 // CHECK15-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
15890 // CHECK15-NEXT:    store i32 [[TMP46]], i32* [[TMP53]], align 4
15891 // CHECK15-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
15892 // CHECK15-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
15893 // CHECK15-NEXT:    store i32 [[TMP46]], i32* [[TMP55]], align 4
15894 // CHECK15-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
15895 // CHECK15-NEXT:    store i8* null, i8** [[TMP56]], align 4
15896 // CHECK15-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
15897 // CHECK15-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
15898 // CHECK15-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
15899 // CHECK15-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
15900 // CHECK15-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
15901 // CHECK15-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
15902 // CHECK15:       omp_offload.failed25:
15903 // CHECK15-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
15904 // CHECK15-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
15905 // CHECK15:       omp_offload.cont26:
15906 // CHECK15-NEXT:    ret i32 0
15907 //
15908 //
15909 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
15910 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15911 // CHECK15-NEXT:  entry:
15912 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
15913 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
15914 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
15915 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
15916 // CHECK15-NEXT:    ret void
15917 //
15918 //
15919 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14
15920 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15921 // CHECK15-NEXT:  entry:
15922 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15923 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15924 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
15925 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15926 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15927 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15928 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15929 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15930 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15931 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15932 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15933 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15934 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
15935 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
15936 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15937 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
15938 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15939 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15940 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15941 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
15942 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15943 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15944 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15945 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15946 // CHECK15:       cond.true:
15947 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15948 // CHECK15:       cond.false:
15949 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15950 // CHECK15-NEXT:    br label [[COND_END]]
15951 // CHECK15:       cond.end:
15952 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15953 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15954 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15955 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
15956 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15957 // CHECK15:       omp.inner.for.cond:
15958 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15959 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
15960 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15961 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15962 // CHECK15:       omp.inner.for.body:
15963 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47
15964 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
15965 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47
15966 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15967 // CHECK15:       omp.inner.for.inc:
15968 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15969 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47
15970 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15971 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15972 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
15973 // CHECK15:       omp.inner.for.end:
15974 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15975 // CHECK15:       omp.loop.exit:
15976 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
15977 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15978 // CHECK15-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15979 // CHECK15-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15980 // CHECK15:       .omp.final.then:
15981 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
15982 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15983 // CHECK15:       .omp.final.done:
15984 // CHECK15-NEXT:    ret void
15985 //
15986 //
15987 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15
15988 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15989 // CHECK15-NEXT:  entry:
15990 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15991 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15992 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15993 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15994 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
15995 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15996 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15997 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15998 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15999 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16000 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16001 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16002 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16003 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16004 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16005 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16006 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16007 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16008 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16009 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16010 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16011 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16012 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16013 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16014 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16015 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16016 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16017 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
16018 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16019 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16020 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
16021 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16022 // CHECK15:       cond.true:
16023 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16024 // CHECK15:       cond.false:
16025 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16026 // CHECK15-NEXT:    br label [[COND_END]]
16027 // CHECK15:       cond.end:
16028 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
16029 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16030 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16031 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
16032 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16033 // CHECK15:       omp.inner.for.cond:
16034 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
16035 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50
16036 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
16037 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16038 // CHECK15:       omp.inner.for.body:
16039 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
16040 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
16041 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16042 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50
16043 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !50
16044 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
16045 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !50
16046 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16047 // CHECK15:       omp.body.continue:
16048 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16049 // CHECK15:       omp.inner.for.inc:
16050 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
16051 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
16052 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
16053 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
16054 // CHECK15:       omp.inner.for.end:
16055 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16056 // CHECK15:       omp.loop.exit:
16057 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
16058 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16059 // CHECK15-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
16060 // CHECK15-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16061 // CHECK15:       .omp.final.then:
16062 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16063 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16064 // CHECK15:       .omp.final.done:
16065 // CHECK15-NEXT:    ret void
16066 //
16067 //
16068 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
16069 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16070 // CHECK15-NEXT:  entry:
16071 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16072 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16073 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16074 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
16075 // CHECK15-NEXT:    ret void
16076 //
16077 //
16078 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17
16079 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16080 // CHECK15-NEXT:  entry:
16081 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16082 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16083 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16084 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16085 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16086 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16087 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16088 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16089 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16090 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16091 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16092 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16093 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16094 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16095 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16096 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
16097 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16098 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16099 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16100 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16101 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16102 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16103 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16104 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16105 // CHECK15:       cond.true:
16106 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16107 // CHECK15:       cond.false:
16108 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16109 // CHECK15-NEXT:    br label [[COND_END]]
16110 // CHECK15:       cond.end:
16111 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16112 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16113 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16114 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16115 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16116 // CHECK15:       omp.inner.for.cond:
16117 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
16118 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
16119 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16120 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16121 // CHECK15:       omp.inner.for.body:
16122 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53
16123 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
16124 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53
16125 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16126 // CHECK15:       omp.inner.for.inc:
16127 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
16128 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !53
16129 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
16130 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
16131 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
16132 // CHECK15:       omp.inner.for.end:
16133 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16134 // CHECK15:       omp.loop.exit:
16135 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16136 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16137 // CHECK15-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
16138 // CHECK15-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16139 // CHECK15:       .omp.final.then:
16140 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16141 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16142 // CHECK15:       .omp.final.done:
16143 // CHECK15-NEXT:    ret void
16144 //
16145 //
16146 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18
16147 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16148 // CHECK15-NEXT:  entry:
16149 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16150 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16151 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16152 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16153 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16154 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16155 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16156 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16157 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16158 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16159 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16160 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16161 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16162 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16163 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16164 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16165 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16166 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16167 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16168 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16169 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16170 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16171 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16172 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16173 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16174 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16175 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16176 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
16177 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16178 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16179 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
16180 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16181 // CHECK15:       cond.true:
16182 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16183 // CHECK15:       cond.false:
16184 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16185 // CHECK15-NEXT:    br label [[COND_END]]
16186 // CHECK15:       cond.end:
16187 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
16188 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16189 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16190 // CHECK15-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
16191 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16192 // CHECK15:       omp.inner.for.cond:
16193 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
16194 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !56
16195 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
16196 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16197 // CHECK15:       omp.inner.for.body:
16198 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
16199 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
16200 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16201 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !56
16202 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !56
16203 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
16204 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !56
16205 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16206 // CHECK15:       omp.body.continue:
16207 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16208 // CHECK15:       omp.inner.for.inc:
16209 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
16210 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
16211 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
16212 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
16213 // CHECK15:       omp.inner.for.end:
16214 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16215 // CHECK15:       omp.loop.exit:
16216 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
16217 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16218 // CHECK15-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
16219 // CHECK15-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16220 // CHECK15:       .omp.final.then:
16221 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16222 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16223 // CHECK15:       .omp.final.done:
16224 // CHECK15-NEXT:    ret void
16225 //
16226 //
16227 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
16228 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16229 // CHECK15-NEXT:  entry:
16230 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16231 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16232 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
16233 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16234 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16235 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16236 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16237 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16238 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16239 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
16240 // CHECK15-NEXT:    ret void
16241 //
16242 //
16243 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21
16244 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16245 // CHECK15-NEXT:  entry:
16246 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16247 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16248 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16249 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16250 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16251 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16252 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16253 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16254 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16255 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16256 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16257 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
16258 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16259 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16260 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16261 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16262 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16263 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16264 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
16265 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16266 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16267 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16268 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16269 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16270 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16271 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16272 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16273 // CHECK15:       cond.true:
16274 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16275 // CHECK15:       cond.false:
16276 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16277 // CHECK15-NEXT:    br label [[COND_END]]
16278 // CHECK15:       cond.end:
16279 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16280 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16281 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16282 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16283 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16284 // CHECK15:       omp.inner.for.cond:
16285 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
16286 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
16287 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16288 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16289 // CHECK15:       omp.inner.for.body:
16290 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59
16291 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
16292 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59
16293 // CHECK15-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
16294 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
16295 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59
16296 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16297 // CHECK15:       omp.inner.for.inc:
16298 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
16299 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59
16300 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
16301 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
16302 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
16303 // CHECK15:       omp.inner.for.end:
16304 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16305 // CHECK15:       omp.loop.exit:
16306 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16307 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16308 // CHECK15-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
16309 // CHECK15-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16310 // CHECK15:       .omp.final.then:
16311 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16312 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16313 // CHECK15:       .omp.final.done:
16314 // CHECK15-NEXT:    ret void
16315 //
16316 //
16317 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22
16318 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16319 // CHECK15-NEXT:  entry:
16320 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16321 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16322 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16323 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16324 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16325 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16326 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16327 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16328 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16329 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16330 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16331 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16332 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16333 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16334 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16335 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16336 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16337 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16338 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16339 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16340 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16341 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16342 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16343 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16344 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16345 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16346 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16347 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16348 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16349 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16350 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
16351 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
16352 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
16353 // CHECK15:       omp.dispatch.cond:
16354 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16355 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16356 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
16357 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16358 // CHECK15:       cond.true:
16359 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16360 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16361 // CHECK15:       cond.false:
16362 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16363 // CHECK15-NEXT:    br label [[COND_END]]
16364 // CHECK15:       cond.end:
16365 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
16366 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16367 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16368 // CHECK15-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
16369 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16370 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16371 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
16372 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
16373 // CHECK15:       omp.dispatch.body:
16374 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16375 // CHECK15:       omp.inner.for.cond:
16376 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16377 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62
16378 // CHECK15-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
16379 // CHECK15-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16380 // CHECK15:       omp.inner.for.body:
16381 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16382 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
16383 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16384 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62
16385 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !62
16386 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
16387 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !62
16388 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16389 // CHECK15:       omp.body.continue:
16390 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16391 // CHECK15:       omp.inner.for.inc:
16392 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16393 // CHECK15-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
16394 // CHECK15-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16395 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
16396 // CHECK15:       omp.inner.for.end:
16397 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
16398 // CHECK15:       omp.dispatch.inc:
16399 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16400 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16401 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
16402 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
16403 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16404 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16405 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
16406 // CHECK15-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
16407 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
16408 // CHECK15:       omp.dispatch.end:
16409 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
16410 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16411 // CHECK15-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
16412 // CHECK15-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16413 // CHECK15:       .omp.final.then:
16414 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16415 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16416 // CHECK15:       .omp.final.done:
16417 // CHECK15-NEXT:    ret void
16418 //
16419 //
16420 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
16421 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16422 // CHECK15-NEXT:  entry:
16423 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16424 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16425 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16426 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
16427 // CHECK15-NEXT:    ret void
16428 //
16429 //
16430 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25
16431 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16432 // CHECK15-NEXT:  entry:
16433 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16434 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16435 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16436 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16437 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16438 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16439 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16440 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16441 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16442 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16443 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16444 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16445 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16446 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16447 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16448 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
16449 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16450 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16451 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16452 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16453 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16454 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16455 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16456 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16457 // CHECK15:       cond.true:
16458 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16459 // CHECK15:       cond.false:
16460 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16461 // CHECK15-NEXT:    br label [[COND_END]]
16462 // CHECK15:       cond.end:
16463 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16464 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16465 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16466 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16467 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16468 // CHECK15:       omp.inner.for.cond:
16469 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16470 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
16471 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16472 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16473 // CHECK15:       omp.inner.for.body:
16474 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65
16475 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
16476 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65
16477 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16478 // CHECK15:       omp.inner.for.inc:
16479 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16480 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !65
16481 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
16482 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16483 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
16484 // CHECK15:       omp.inner.for.end:
16485 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16486 // CHECK15:       omp.loop.exit:
16487 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16488 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16489 // CHECK15-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
16490 // CHECK15-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16491 // CHECK15:       .omp.final.then:
16492 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16493 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16494 // CHECK15:       .omp.final.done:
16495 // CHECK15-NEXT:    ret void
16496 //
16497 //
16498 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26
16499 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
16500 // CHECK15-NEXT:  entry:
16501 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16502 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16503 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16504 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16505 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16506 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16507 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16508 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16509 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16510 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16511 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16512 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16513 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16514 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16515 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16516 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16517 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16518 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16519 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16520 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16521 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16522 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16523 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16524 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16525 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16526 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16527 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16528 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16529 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16530 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
16531 // CHECK15-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
16532 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
16533 // CHECK15:       omp.dispatch.cond:
16534 // CHECK15-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
16535 // CHECK15-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
16536 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
16537 // CHECK15:       omp.dispatch.body:
16538 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16539 // CHECK15-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
16540 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16541 // CHECK15:       omp.inner.for.cond:
16542 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16543 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !68
16544 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
16545 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16546 // CHECK15:       omp.inner.for.body:
16547 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16548 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
16549 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16550 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !68
16551 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !68
16552 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
16553 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !68
16554 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16555 // CHECK15:       omp.body.continue:
16556 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16557 // CHECK15:       omp.inner.for.inc:
16558 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16559 // CHECK15-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
16560 // CHECK15-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16561 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
16562 // CHECK15:       omp.inner.for.end:
16563 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
16564 // CHECK15:       omp.dispatch.inc:
16565 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
16566 // CHECK15:       omp.dispatch.end:
16567 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16568 // CHECK15-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
16569 // CHECK15-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16570 // CHECK15:       .omp.final.then:
16571 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16572 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16573 // CHECK15:       .omp.final.done:
16574 // CHECK15-NEXT:    ret void
16575 //
16576 //
16577 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
16578 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16579 // CHECK15-NEXT:  entry:
16580 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16581 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16582 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
16583 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16584 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16585 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16586 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16587 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16588 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16589 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
16590 // CHECK15-NEXT:    ret void
16591 //
16592 //
16593 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29
16594 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16595 // CHECK15-NEXT:  entry:
16596 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16597 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16598 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16599 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16600 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16601 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16602 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16603 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16604 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16605 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16606 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16607 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
16608 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16609 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16610 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16611 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16612 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16613 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16614 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
16615 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16616 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16617 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16618 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16619 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16620 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16621 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16622 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16623 // CHECK15:       cond.true:
16624 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16625 // CHECK15:       cond.false:
16626 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16627 // CHECK15-NEXT:    br label [[COND_END]]
16628 // CHECK15:       cond.end:
16629 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16630 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16631 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16632 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16633 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16634 // CHECK15:       omp.inner.for.cond:
16635 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16636 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
16637 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16638 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16639 // CHECK15:       omp.inner.for.body:
16640 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !71
16641 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
16642 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71
16643 // CHECK15-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
16644 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
16645 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71
16646 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16647 // CHECK15:       omp.inner.for.inc:
16648 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16649 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !71
16650 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
16651 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16652 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
16653 // CHECK15:       omp.inner.for.end:
16654 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16655 // CHECK15:       omp.loop.exit:
16656 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16657 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16658 // CHECK15-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
16659 // CHECK15-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16660 // CHECK15:       .omp.final.then:
16661 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16662 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16663 // CHECK15:       .omp.final.done:
16664 // CHECK15-NEXT:    ret void
16665 //
16666 //
16667 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30
16668 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16669 // CHECK15-NEXT:  entry:
16670 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16671 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16672 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16673 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16674 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
16675 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16676 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16677 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16678 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16679 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16680 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16681 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16682 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16683 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16684 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16685 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16686 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16687 // CHECK15-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
16688 // CHECK15-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16689 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
16690 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16691 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16692 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16693 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16694 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16695 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16696 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16697 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16698 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
16699 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16700 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16701 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16702 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
16703 // CHECK15-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
16704 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
16705 // CHECK15:       omp.dispatch.cond:
16706 // CHECK15-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
16707 // CHECK15-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
16708 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
16709 // CHECK15:       omp.dispatch.body:
16710 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16711 // CHECK15-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
16712 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16713 // CHECK15:       omp.inner.for.cond:
16714 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16715 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !74
16716 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
16717 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16718 // CHECK15:       omp.inner.for.body:
16719 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16720 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
16721 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16722 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !74
16723 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !74
16724 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
16725 // CHECK15-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !74
16726 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16727 // CHECK15:       omp.body.continue:
16728 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16729 // CHECK15:       omp.inner.for.inc:
16730 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16731 // CHECK15-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
16732 // CHECK15-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16733 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
16734 // CHECK15:       omp.inner.for.end:
16735 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
16736 // CHECK15:       omp.dispatch.inc:
16737 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
16738 // CHECK15:       omp.dispatch.end:
16739 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16740 // CHECK15-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
16741 // CHECK15-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16742 // CHECK15:       .omp.final.then:
16743 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
16744 // CHECK15-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16745 // CHECK15:       .omp.final.done:
16746 // CHECK15-NEXT:    ret void
16747 //
16748 //
16749 // CHECK15-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
16750 // CHECK15-SAME: () #[[ATTR5:[0-9]+]] {
16751 // CHECK15-NEXT:  entry:
16752 // CHECK15-NEXT:    call void @__tgt_register_requires(i64 1)
16753 // CHECK15-NEXT:    ret void
16754 //
16755 //
16756 // CHECK16-LABEL: define {{[^@]+}}@main
16757 // CHECK16-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
16758 // CHECK16-NEXT:  entry:
16759 // CHECK16-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
16760 // CHECK16-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
16761 // CHECK16-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
16762 // CHECK16-NEXT:    [[N:%.*]] = alloca i32, align 4
16763 // CHECK16-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16764 // CHECK16-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16765 // CHECK16-NEXT:    [[M:%.*]] = alloca i32, align 4
16766 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16767 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
16768 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
16769 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
16770 // CHECK16-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
16771 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16772 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16773 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16774 // CHECK16-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
16775 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
16776 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
16777 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
16778 // CHECK16-NEXT:    [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
16779 // CHECK16-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
16780 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
16781 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
16782 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
16783 // CHECK16-NEXT:    [[N_CASTED18:%.*]] = alloca i32, align 4
16784 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
16785 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
16786 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
16787 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
16788 // CHECK16-NEXT:    [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
16789 // CHECK16-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
16790 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
16791 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
16792 // CHECK16-NEXT:    [[N_CASTED32:%.*]] = alloca i32, align 4
16793 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
16794 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
16795 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
16796 // CHECK16-NEXT:    [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
16797 // CHECK16-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
16798 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
16799 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
16800 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
16801 // CHECK16-NEXT:    [[N_CASTED47:%.*]] = alloca i32, align 4
16802 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
16803 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
16804 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
16805 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
16806 // CHECK16-NEXT:    [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
16807 // CHECK16-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
16808 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
16809 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
16810 // CHECK16-NEXT:    store i32 0, i32* [[RETVAL]], align 4
16811 // CHECK16-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
16812 // CHECK16-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
16813 // CHECK16-NEXT:    store i32 100, i32* [[N]], align 4
16814 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
16815 // CHECK16-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
16816 // CHECK16-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
16817 // CHECK16-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
16818 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
16819 // CHECK16-NEXT:    store i32 10, i32* [[M]], align 4
16820 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
16821 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
16822 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
16823 // CHECK16-NEXT:    [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
16824 // CHECK16-NEXT:    [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
16825 // CHECK16-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
16826 // CHECK16-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
16827 // CHECK16-NEXT:    store i32 [[TMP3]], i32* [[TMP7]], align 4
16828 // CHECK16-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
16829 // CHECK16-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
16830 // CHECK16-NEXT:    store i32 [[TMP3]], i32* [[TMP9]], align 4
16831 // CHECK16-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
16832 // CHECK16-NEXT:    store i64 4, i64* [[TMP10]], align 4
16833 // CHECK16-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
16834 // CHECK16-NEXT:    store i8* null, i8** [[TMP11]], align 4
16835 // CHECK16-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
16836 // CHECK16-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
16837 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP13]], align 4
16838 // CHECK16-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
16839 // CHECK16-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
16840 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP15]], align 4
16841 // CHECK16-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
16842 // CHECK16-NEXT:    store i64 4, i64* [[TMP16]], align 4
16843 // CHECK16-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
16844 // CHECK16-NEXT:    store i8* null, i8** [[TMP17]], align 4
16845 // CHECK16-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
16846 // CHECK16-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
16847 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 4
16848 // CHECK16-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
16849 // CHECK16-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
16850 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 4
16851 // CHECK16-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
16852 // CHECK16-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 4
16853 // CHECK16-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
16854 // CHECK16-NEXT:    store i8* null, i8** [[TMP23]], align 4
16855 // CHECK16-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
16856 // CHECK16-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
16857 // CHECK16-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
16858 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
16859 // CHECK16-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
16860 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16861 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
16862 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16863 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16864 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16865 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16866 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
16867 // CHECK16-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
16868 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
16869 // CHECK16-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
16870 // CHECK16-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
16871 // CHECK16-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
16872 // CHECK16:       omp_offload.failed:
16873 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
16874 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT]]
16875 // CHECK16:       omp_offload.cont:
16876 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
16877 // CHECK16-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
16878 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
16879 // CHECK16-NEXT:    [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
16880 // CHECK16-NEXT:    [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
16881 // CHECK16-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
16882 // CHECK16-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
16883 // CHECK16-NEXT:    store i32 [[TMP34]], i32* [[TMP38]], align 4
16884 // CHECK16-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
16885 // CHECK16-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
16886 // CHECK16-NEXT:    store i32 [[TMP34]], i32* [[TMP40]], align 4
16887 // CHECK16-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
16888 // CHECK16-NEXT:    store i64 4, i64* [[TMP41]], align 4
16889 // CHECK16-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
16890 // CHECK16-NEXT:    store i8* null, i8** [[TMP42]], align 4
16891 // CHECK16-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
16892 // CHECK16-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
16893 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP44]], align 4
16894 // CHECK16-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
16895 // CHECK16-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
16896 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP46]], align 4
16897 // CHECK16-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
16898 // CHECK16-NEXT:    store i64 4, i64* [[TMP47]], align 4
16899 // CHECK16-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
16900 // CHECK16-NEXT:    store i8* null, i8** [[TMP48]], align 4
16901 // CHECK16-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
16902 // CHECK16-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
16903 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP50]], align 4
16904 // CHECK16-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
16905 // CHECK16-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
16906 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP52]], align 4
16907 // CHECK16-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
16908 // CHECK16-NEXT:    store i64 [[TMP36]], i64* [[TMP53]], align 4
16909 // CHECK16-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
16910 // CHECK16-NEXT:    store i8* null, i8** [[TMP54]], align 4
16911 // CHECK16-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
16912 // CHECK16-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
16913 // CHECK16-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
16914 // CHECK16-NEXT:    [[TMP58:%.*]] = load i32, i32* [[N]], align 4
16915 // CHECK16-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
16916 // CHECK16-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
16917 // CHECK16-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
16918 // CHECK16-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
16919 // CHECK16-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
16920 // CHECK16-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
16921 // CHECK16-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
16922 // CHECK16-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
16923 // CHECK16-NEXT:    [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
16924 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
16925 // CHECK16-NEXT:    [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
16926 // CHECK16-NEXT:    [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
16927 // CHECK16-NEXT:    br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
16928 // CHECK16:       omp_offload.failed15:
16929 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
16930 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
16931 // CHECK16:       omp_offload.cont16:
16932 // CHECK16-NEXT:    [[TMP64:%.*]] = load i32, i32* [[M]], align 4
16933 // CHECK16-NEXT:    store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
16934 // CHECK16-NEXT:    [[TMP65:%.*]] = load i32, i32* [[N]], align 4
16935 // CHECK16-NEXT:    store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
16936 // CHECK16-NEXT:    [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
16937 // CHECK16-NEXT:    [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
16938 // CHECK16-NEXT:    store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16939 // CHECK16-NEXT:    [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
16940 // CHECK16-NEXT:    [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
16941 // CHECK16-NEXT:    [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
16942 // CHECK16-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
16943 // CHECK16-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
16944 // CHECK16-NEXT:    store i32 [[TMP66]], i32* [[TMP72]], align 4
16945 // CHECK16-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
16946 // CHECK16-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
16947 // CHECK16-NEXT:    store i32 [[TMP66]], i32* [[TMP74]], align 4
16948 // CHECK16-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
16949 // CHECK16-NEXT:    store i64 4, i64* [[TMP75]], align 4
16950 // CHECK16-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
16951 // CHECK16-NEXT:    store i8* null, i8** [[TMP76]], align 4
16952 // CHECK16-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
16953 // CHECK16-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
16954 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP78]], align 4
16955 // CHECK16-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
16956 // CHECK16-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
16957 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP80]], align 4
16958 // CHECK16-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
16959 // CHECK16-NEXT:    store i64 4, i64* [[TMP81]], align 4
16960 // CHECK16-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
16961 // CHECK16-NEXT:    store i8* null, i8** [[TMP82]], align 4
16962 // CHECK16-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
16963 // CHECK16-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
16964 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 4
16965 // CHECK16-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
16966 // CHECK16-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
16967 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP86]], align 4
16968 // CHECK16-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
16969 // CHECK16-NEXT:    store i64 [[TMP70]], i64* [[TMP87]], align 4
16970 // CHECK16-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
16971 // CHECK16-NEXT:    store i8* null, i8** [[TMP88]], align 4
16972 // CHECK16-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
16973 // CHECK16-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
16974 // CHECK16-NEXT:    store i32 [[TMP68]], i32* [[TMP90]], align 4
16975 // CHECK16-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
16976 // CHECK16-NEXT:    [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
16977 // CHECK16-NEXT:    store i32 [[TMP68]], i32* [[TMP92]], align 4
16978 // CHECK16-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
16979 // CHECK16-NEXT:    store i64 4, i64* [[TMP93]], align 4
16980 // CHECK16-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
16981 // CHECK16-NEXT:    store i8* null, i8** [[TMP94]], align 4
16982 // CHECK16-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
16983 // CHECK16-NEXT:    [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
16984 // CHECK16-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
16985 // CHECK16-NEXT:    [[TMP98:%.*]] = load i32, i32* [[N]], align 4
16986 // CHECK16-NEXT:    store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
16987 // CHECK16-NEXT:    [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
16988 // CHECK16-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
16989 // CHECK16-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
16990 // CHECK16-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
16991 // CHECK16-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
16992 // CHECK16-NEXT:    [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
16993 // CHECK16-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
16994 // CHECK16-NEXT:    [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
16995 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
16996 // CHECK16-NEXT:    [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
16997 // CHECK16-NEXT:    [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
16998 // CHECK16-NEXT:    br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
16999 // CHECK16:       omp_offload.failed30:
17000 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
17001 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
17002 // CHECK16:       omp_offload.cont31:
17003 // CHECK16-NEXT:    [[TMP104:%.*]] = load i32, i32* [[N]], align 4
17004 // CHECK16-NEXT:    store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
17005 // CHECK16-NEXT:    [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
17006 // CHECK16-NEXT:    [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
17007 // CHECK16-NEXT:    [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
17008 // CHECK16-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
17009 // CHECK16-NEXT:    [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
17010 // CHECK16-NEXT:    store i32 [[TMP105]], i32* [[TMP109]], align 4
17011 // CHECK16-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
17012 // CHECK16-NEXT:    [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
17013 // CHECK16-NEXT:    store i32 [[TMP105]], i32* [[TMP111]], align 4
17014 // CHECK16-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
17015 // CHECK16-NEXT:    store i64 4, i64* [[TMP112]], align 4
17016 // CHECK16-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
17017 // CHECK16-NEXT:    store i8* null, i8** [[TMP113]], align 4
17018 // CHECK16-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
17019 // CHECK16-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
17020 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP115]], align 4
17021 // CHECK16-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
17022 // CHECK16-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
17023 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP117]], align 4
17024 // CHECK16-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
17025 // CHECK16-NEXT:    store i64 4, i64* [[TMP118]], align 4
17026 // CHECK16-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
17027 // CHECK16-NEXT:    store i8* null, i8** [[TMP119]], align 4
17028 // CHECK16-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
17029 // CHECK16-NEXT:    [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
17030 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP121]], align 4
17031 // CHECK16-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
17032 // CHECK16-NEXT:    [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
17033 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP123]], align 4
17034 // CHECK16-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
17035 // CHECK16-NEXT:    store i64 [[TMP107]], i64* [[TMP124]], align 4
17036 // CHECK16-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
17037 // CHECK16-NEXT:    store i8* null, i8** [[TMP125]], align 4
17038 // CHECK16-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
17039 // CHECK16-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
17040 // CHECK16-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
17041 // CHECK16-NEXT:    [[TMP129:%.*]] = load i32, i32* [[N]], align 4
17042 // CHECK16-NEXT:    store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
17043 // CHECK16-NEXT:    [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
17044 // CHECK16-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
17045 // CHECK16-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
17046 // CHECK16-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
17047 // CHECK16-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
17048 // CHECK16-NEXT:    [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
17049 // CHECK16-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
17050 // CHECK16-NEXT:    [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
17051 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
17052 // CHECK16-NEXT:    [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
17053 // CHECK16-NEXT:    [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
17054 // CHECK16-NEXT:    br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
17055 // CHECK16:       omp_offload.failed44:
17056 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
17057 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
17058 // CHECK16:       omp_offload.cont45:
17059 // CHECK16-NEXT:    [[TMP135:%.*]] = load i32, i32* [[M]], align 4
17060 // CHECK16-NEXT:    store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
17061 // CHECK16-NEXT:    [[TMP136:%.*]] = load i32, i32* [[N]], align 4
17062 // CHECK16-NEXT:    store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
17063 // CHECK16-NEXT:    [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
17064 // CHECK16-NEXT:    [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
17065 // CHECK16-NEXT:    store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
17066 // CHECK16-NEXT:    [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
17067 // CHECK16-NEXT:    [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
17068 // CHECK16-NEXT:    [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
17069 // CHECK16-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
17070 // CHECK16-NEXT:    [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
17071 // CHECK16-NEXT:    store i32 [[TMP137]], i32* [[TMP143]], align 4
17072 // CHECK16-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
17073 // CHECK16-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
17074 // CHECK16-NEXT:    store i32 [[TMP137]], i32* [[TMP145]], align 4
17075 // CHECK16-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
17076 // CHECK16-NEXT:    store i64 4, i64* [[TMP146]], align 4
17077 // CHECK16-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
17078 // CHECK16-NEXT:    store i8* null, i8** [[TMP147]], align 4
17079 // CHECK16-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
17080 // CHECK16-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
17081 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP149]], align 4
17082 // CHECK16-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
17083 // CHECK16-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
17084 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[TMP151]], align 4
17085 // CHECK16-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
17086 // CHECK16-NEXT:    store i64 4, i64* [[TMP152]], align 4
17087 // CHECK16-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
17088 // CHECK16-NEXT:    store i8* null, i8** [[TMP153]], align 4
17089 // CHECK16-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
17090 // CHECK16-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
17091 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP155]], align 4
17092 // CHECK16-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
17093 // CHECK16-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
17094 // CHECK16-NEXT:    store i32* [[VLA]], i32** [[TMP157]], align 4
17095 // CHECK16-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
17096 // CHECK16-NEXT:    store i64 [[TMP141]], i64* [[TMP158]], align 4
17097 // CHECK16-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
17098 // CHECK16-NEXT:    store i8* null, i8** [[TMP159]], align 4
17099 // CHECK16-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
17100 // CHECK16-NEXT:    [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
17101 // CHECK16-NEXT:    store i32 [[TMP139]], i32* [[TMP161]], align 4
17102 // CHECK16-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
17103 // CHECK16-NEXT:    [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
17104 // CHECK16-NEXT:    store i32 [[TMP139]], i32* [[TMP163]], align 4
17105 // CHECK16-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
17106 // CHECK16-NEXT:    store i64 4, i64* [[TMP164]], align 4
17107 // CHECK16-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
17108 // CHECK16-NEXT:    store i8* null, i8** [[TMP165]], align 4
17109 // CHECK16-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
17110 // CHECK16-NEXT:    [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
17111 // CHECK16-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
17112 // CHECK16-NEXT:    [[TMP169:%.*]] = load i32, i32* [[N]], align 4
17113 // CHECK16-NEXT:    store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
17114 // CHECK16-NEXT:    [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
17115 // CHECK16-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
17116 // CHECK16-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
17117 // CHECK16-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
17118 // CHECK16-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
17119 // CHECK16-NEXT:    [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
17120 // CHECK16-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
17121 // CHECK16-NEXT:    [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
17122 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
17123 // CHECK16-NEXT:    [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
17124 // CHECK16-NEXT:    [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
17125 // CHECK16-NEXT:    br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
17126 // CHECK16:       omp_offload.failed60:
17127 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
17128 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
17129 // CHECK16:       omp_offload.cont61:
17130 // CHECK16-NEXT:    [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
17131 // CHECK16-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
17132 // CHECK16-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
17133 // CHECK16-NEXT:    [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
17134 // CHECK16-NEXT:    call void @llvm.stackrestore(i8* [[TMP176]])
17135 // CHECK16-NEXT:    [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
17136 // CHECK16-NEXT:    ret i32 [[TMP177]]
17137 //
17138 //
17139 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
17140 // CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
17141 // CHECK16-NEXT:  entry:
17142 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17143 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17144 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17145 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17146 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17147 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17148 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17149 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17150 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17151 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17152 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17153 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17154 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
17155 // CHECK16-NEXT:    ret void
17156 //
17157 //
17158 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined.
17159 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17160 // CHECK16-NEXT:  entry:
17161 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17162 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17163 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17164 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17165 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17166 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17167 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17168 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17169 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17170 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17171 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17172 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17173 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17174 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17175 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17176 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17177 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17178 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17179 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17180 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17181 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17182 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17183 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17184 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17185 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
17186 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17187 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17188 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17189 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17190 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17191 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17192 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17193 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17194 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17195 // CHECK16:       omp.precond.then:
17196 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17197 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17198 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
17199 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17200 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17201 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17202 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
17203 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17204 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17205 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17206 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
17207 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17208 // CHECK16:       cond.true:
17209 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17210 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17211 // CHECK16:       cond.false:
17212 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17213 // CHECK16-NEXT:    br label [[COND_END]]
17214 // CHECK16:       cond.end:
17215 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
17216 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17217 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17218 // CHECK16-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
17219 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17220 // CHECK16:       omp.inner.for.cond:
17221 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
17222 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
17223 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
17224 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17225 // CHECK16:       omp.inner.for.body:
17226 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14
17227 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
17228 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !14
17229 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !14
17230 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !14
17231 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !14
17232 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17233 // CHECK16:       omp.inner.for.inc:
17234 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
17235 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14
17236 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
17237 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
17238 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
17239 // CHECK16:       omp.inner.for.end:
17240 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17241 // CHECK16:       omp.loop.exit:
17242 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17243 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
17244 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
17245 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17246 // CHECK16-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
17247 // CHECK16-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17248 // CHECK16:       .omp.final.then:
17249 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17250 // CHECK16-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
17251 // CHECK16-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17252 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17253 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17254 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17255 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17256 // CHECK16:       .omp.final.done:
17257 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17258 // CHECK16:       omp.precond.end:
17259 // CHECK16-NEXT:    ret void
17260 //
17261 //
17262 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..1
17263 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17264 // CHECK16-NEXT:  entry:
17265 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17266 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17267 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17268 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17269 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17270 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17271 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17272 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17273 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17274 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17275 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17276 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17277 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17278 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17279 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17280 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17281 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17282 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17283 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17284 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17285 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17286 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17287 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17288 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17289 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17290 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17291 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17292 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
17293 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17294 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17295 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17296 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17297 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17298 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17299 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17300 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17301 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17302 // CHECK16:       omp.precond.then:
17303 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17304 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17305 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
17306 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17307 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17308 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
17309 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17310 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17311 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17312 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17313 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17314 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17315 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17316 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17317 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17318 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17319 // CHECK16:       cond.true:
17320 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17321 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17322 // CHECK16:       cond.false:
17323 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17324 // CHECK16-NEXT:    br label [[COND_END]]
17325 // CHECK16:       cond.end:
17326 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17327 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17328 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17329 // CHECK16-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17330 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17331 // CHECK16:       omp.inner.for.cond:
17332 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
17333 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
17334 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17335 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17336 // CHECK16:       omp.inner.for.body:
17337 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
17338 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
17339 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17340 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !18
17341 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !18
17342 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
17343 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
17344 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17345 // CHECK16:       omp.body.continue:
17346 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17347 // CHECK16:       omp.inner.for.inc:
17348 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
17349 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
17350 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
17351 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
17352 // CHECK16:       omp.inner.for.end:
17353 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17354 // CHECK16:       omp.loop.exit:
17355 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17356 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
17357 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
17358 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17359 // CHECK16-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
17360 // CHECK16-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17361 // CHECK16:       .omp.final.then:
17362 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17363 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
17364 // CHECK16-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17365 // CHECK16-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
17366 // CHECK16-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
17367 // CHECK16-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
17368 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17369 // CHECK16:       .omp.final.done:
17370 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17371 // CHECK16:       omp.precond.end:
17372 // CHECK16-NEXT:    ret void
17373 //
17374 //
17375 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
17376 // CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17377 // CHECK16-NEXT:  entry:
17378 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17379 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17380 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17381 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17382 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17383 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17384 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17385 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17386 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17387 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17388 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17389 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17390 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
17391 // CHECK16-NEXT:    ret void
17392 //
17393 //
17394 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..2
17395 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17396 // CHECK16-NEXT:  entry:
17397 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17398 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17399 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17400 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17401 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17402 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17403 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17404 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17405 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17406 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17407 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17408 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17409 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17410 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17411 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17412 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17413 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17414 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17415 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17416 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17417 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17418 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17419 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17420 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17421 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
17422 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17423 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17424 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17425 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17426 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17427 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17428 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17429 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17430 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17431 // CHECK16:       omp.precond.then:
17432 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17433 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17434 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
17435 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17436 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17437 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17438 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
17439 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17440 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17441 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17442 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
17443 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17444 // CHECK16:       cond.true:
17445 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17446 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17447 // CHECK16:       cond.false:
17448 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17449 // CHECK16-NEXT:    br label [[COND_END]]
17450 // CHECK16:       cond.end:
17451 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
17452 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17453 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17454 // CHECK16-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
17455 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17456 // CHECK16:       omp.inner.for.cond:
17457 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17458 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
17459 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
17460 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17461 // CHECK16:       omp.inner.for.body:
17462 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
17463 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
17464 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !23
17465 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !23
17466 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !23
17467 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !23
17468 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17469 // CHECK16:       omp.inner.for.inc:
17470 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17471 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
17472 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
17473 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17474 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
17475 // CHECK16:       omp.inner.for.end:
17476 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17477 // CHECK16:       omp.loop.exit:
17478 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17479 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
17480 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
17481 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17482 // CHECK16-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
17483 // CHECK16-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17484 // CHECK16:       .omp.final.then:
17485 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17486 // CHECK16-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
17487 // CHECK16-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17488 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17489 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17490 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17491 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17492 // CHECK16:       .omp.final.done:
17493 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17494 // CHECK16:       omp.precond.end:
17495 // CHECK16-NEXT:    ret void
17496 //
17497 //
17498 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..3
17499 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17500 // CHECK16-NEXT:  entry:
17501 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17502 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17503 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17504 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17505 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17506 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17507 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17508 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17509 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17510 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17511 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17512 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17513 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17514 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17515 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17516 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17517 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17518 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17519 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17520 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17521 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17522 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17523 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17524 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17525 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17526 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17527 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17528 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
17529 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17530 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17531 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17532 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17533 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17534 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17535 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17536 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17537 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17538 // CHECK16:       omp.precond.then:
17539 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17540 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17541 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
17542 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17543 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17544 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
17545 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17546 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17547 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17548 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17549 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17550 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17551 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17552 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17553 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17554 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17555 // CHECK16:       cond.true:
17556 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17557 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17558 // CHECK16:       cond.false:
17559 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17560 // CHECK16-NEXT:    br label [[COND_END]]
17561 // CHECK16:       cond.end:
17562 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17563 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17564 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17565 // CHECK16-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17566 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17567 // CHECK16:       omp.inner.for.cond:
17568 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
17569 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
17570 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17571 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17572 // CHECK16:       omp.inner.for.body:
17573 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
17574 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
17575 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17576 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !26
17577 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !26
17578 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
17579 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
17580 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17581 // CHECK16:       omp.body.continue:
17582 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17583 // CHECK16:       omp.inner.for.inc:
17584 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
17585 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
17586 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
17587 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
17588 // CHECK16:       omp.inner.for.end:
17589 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17590 // CHECK16:       omp.loop.exit:
17591 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17592 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
17593 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
17594 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17595 // CHECK16-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
17596 // CHECK16-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17597 // CHECK16:       .omp.final.then:
17598 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17599 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
17600 // CHECK16-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17601 // CHECK16-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
17602 // CHECK16-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
17603 // CHECK16-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
17604 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17605 // CHECK16:       .omp.final.done:
17606 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17607 // CHECK16:       omp.precond.end:
17608 // CHECK16-NEXT:    ret void
17609 //
17610 //
17611 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
17612 // CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
17613 // CHECK16-NEXT:  entry:
17614 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17615 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17616 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17617 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
17618 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17619 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
17620 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17621 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17622 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17623 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
17624 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17625 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17626 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17627 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17628 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17629 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
17630 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
17631 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
17632 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
17633 // CHECK16-NEXT:    ret void
17634 //
17635 //
17636 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5
17637 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
17638 // CHECK16-NEXT:  entry:
17639 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17640 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17641 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17642 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17643 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17644 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
17645 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17646 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17647 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17648 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17649 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17650 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17651 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17652 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17653 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17654 // CHECK16-NEXT:    [[I4:%.*]] = alloca i32, align 4
17655 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17656 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
17657 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17658 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17659 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17660 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17661 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17662 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
17663 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17664 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17665 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17666 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17667 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17668 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17669 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17670 // CHECK16-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17671 // CHECK16-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17672 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17673 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17674 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17675 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17676 // CHECK16:       omp.precond.then:
17677 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17678 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17679 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
17680 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17681 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17682 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
17683 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17684 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
17685 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
17686 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17687 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17688 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
17689 // CHECK16-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17690 // CHECK16:       cond.true:
17691 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17692 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17693 // CHECK16:       cond.false:
17694 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17695 // CHECK16-NEXT:    br label [[COND_END]]
17696 // CHECK16:       cond.end:
17697 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
17698 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17699 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17700 // CHECK16-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
17701 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17702 // CHECK16:       omp.inner.for.cond:
17703 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
17704 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
17705 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
17706 // CHECK16-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
17707 // CHECK16-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17708 // CHECK16:       omp.inner.for.body:
17709 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
17710 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17711 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !29
17712 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4, !llvm.access.group !29
17713 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !29
17714 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29
17715 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
17716 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
17717 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29
17718 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17719 // CHECK16:       omp.inner.for.inc:
17720 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
17721 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
17722 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
17723 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
17724 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
17725 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
17726 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
17727 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
17728 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17729 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
17730 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
17731 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17732 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17733 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
17734 // CHECK16-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
17735 // CHECK16-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
17736 // CHECK16:       cond.true11:
17737 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
17738 // CHECK16-NEXT:    br label [[COND_END13:%.*]]
17739 // CHECK16:       cond.false12:
17740 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17741 // CHECK16-NEXT:    br label [[COND_END13]]
17742 // CHECK16:       cond.end13:
17743 // CHECK16-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
17744 // CHECK16-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
17745 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
17746 // CHECK16-NEXT:    store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
17747 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
17748 // CHECK16:       omp.inner.for.end:
17749 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17750 // CHECK16:       omp.loop.exit:
17751 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17752 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
17753 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
17754 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17755 // CHECK16-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
17756 // CHECK16-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17757 // CHECK16:       .omp.final.then:
17758 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17759 // CHECK16-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP37]], 0
17760 // CHECK16-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
17761 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
17762 // CHECK16-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
17763 // CHECK16-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
17764 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17765 // CHECK16:       .omp.final.done:
17766 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17767 // CHECK16:       omp.precond.end:
17768 // CHECK16-NEXT:    ret void
17769 //
17770 //
17771 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6
17772 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
17773 // CHECK16-NEXT:  entry:
17774 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17775 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17776 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17777 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17778 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17779 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17780 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17781 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
17782 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17783 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17784 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17785 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17786 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17787 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17788 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17789 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17790 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17791 // CHECK16-NEXT:    [[I4:%.*]] = alloca i32, align 4
17792 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17793 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17794 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17795 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17796 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17797 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17798 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17799 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
17800 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17801 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17802 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17803 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17804 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17805 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17806 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17807 // CHECK16-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17808 // CHECK16-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17809 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17810 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17811 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17812 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17813 // CHECK16:       omp.precond.then:
17814 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17815 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17816 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
17817 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17818 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17819 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
17820 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17821 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17822 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17823 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17824 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17825 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17826 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17827 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17828 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17829 // CHECK16-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17830 // CHECK16:       cond.true:
17831 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17832 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17833 // CHECK16:       cond.false:
17834 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17835 // CHECK16-NEXT:    br label [[COND_END]]
17836 // CHECK16:       cond.end:
17837 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17838 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17839 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17840 // CHECK16-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17841 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17842 // CHECK16:       omp.inner.for.cond:
17843 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
17844 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
17845 // CHECK16-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17846 // CHECK16-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17847 // CHECK16:       omp.inner.for.body:
17848 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
17849 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
17850 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17851 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !32
17852 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !32
17853 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
17854 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
17855 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17856 // CHECK16:       omp.body.continue:
17857 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17858 // CHECK16:       omp.inner.for.inc:
17859 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
17860 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
17861 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
17862 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
17863 // CHECK16:       omp.inner.for.end:
17864 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17865 // CHECK16:       omp.loop.exit:
17866 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17867 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
17868 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
17869 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17870 // CHECK16-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
17871 // CHECK16-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17872 // CHECK16:       .omp.final.then:
17873 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17874 // CHECK16-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP24]], 0
17875 // CHECK16-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
17876 // CHECK16-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
17877 // CHECK16-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
17878 // CHECK16-NEXT:    store i32 [[ADD11]], i32* [[I4]], align 4
17879 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17880 // CHECK16:       .omp.final.done:
17881 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17882 // CHECK16:       omp.precond.end:
17883 // CHECK16-NEXT:    ret void
17884 //
17885 //
17886 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
17887 // CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17888 // CHECK16-NEXT:  entry:
17889 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17890 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17891 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17892 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17893 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17894 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17895 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17896 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17897 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17898 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17899 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17900 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17901 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
17902 // CHECK16-NEXT:    ret void
17903 //
17904 //
17905 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8
17906 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
17907 // CHECK16-NEXT:  entry:
17908 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17909 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17910 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17911 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
17912 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
17913 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17914 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17915 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17916 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17917 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17918 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17919 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17920 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17921 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17922 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17923 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17924 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17925 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17926 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17927 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
17928 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
17929 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
17930 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
17931 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17932 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
17933 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17934 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17935 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17936 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17937 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17938 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17939 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17940 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
17941 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17942 // CHECK16:       omp.precond.then:
17943 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17944 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17945 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
17946 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17947 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17948 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17949 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
17950 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17951 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17952 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17953 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
17954 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17955 // CHECK16:       cond.true:
17956 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17957 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17958 // CHECK16:       cond.false:
17959 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17960 // CHECK16-NEXT:    br label [[COND_END]]
17961 // CHECK16:       cond.end:
17962 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
17963 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17964 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17965 // CHECK16-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
17966 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17967 // CHECK16:       omp.inner.for.cond:
17968 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
17969 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
17970 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
17971 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17972 // CHECK16:       omp.inner.for.body:
17973 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
17974 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
17975 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35
17976 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35
17977 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35
17978 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35
17979 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17980 // CHECK16:       omp.inner.for.inc:
17981 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
17982 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
17983 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
17984 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
17985 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
17986 // CHECK16:       omp.inner.for.end:
17987 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17988 // CHECK16:       omp.loop.exit:
17989 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17990 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
17991 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
17992 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17993 // CHECK16-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
17994 // CHECK16-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17995 // CHECK16:       .omp.final.then:
17996 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17997 // CHECK16-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
17998 // CHECK16-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17999 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
18000 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
18001 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
18002 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18003 // CHECK16:       .omp.final.done:
18004 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18005 // CHECK16:       omp.precond.end:
18006 // CHECK16-NEXT:    ret void
18007 //
18008 //
18009 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9
18010 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
18011 // CHECK16-NEXT:  entry:
18012 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18013 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18014 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18015 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18016 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18017 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
18018 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
18019 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18020 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18021 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18022 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18023 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18024 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18025 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18026 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18027 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18028 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
18029 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18030 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18031 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18032 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18033 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18034 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
18035 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
18036 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
18037 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
18038 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18039 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
18040 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18041 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
18042 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18043 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18044 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18045 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18046 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18047 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
18048 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18049 // CHECK16:       omp.precond.then:
18050 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18051 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18052 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
18053 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18054 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18055 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
18056 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18057 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18058 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18059 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18060 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18061 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18062 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
18063 // CHECK16-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
18064 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18065 // CHECK16:       omp.dispatch.cond:
18066 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18067 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
18068 // CHECK16-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
18069 // CHECK16-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
18070 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18071 // CHECK16:       omp.dispatch.body:
18072 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18073 // CHECK16-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
18074 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18075 // CHECK16:       omp.inner.for.cond:
18076 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
18077 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
18078 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
18079 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18080 // CHECK16:       omp.inner.for.body:
18081 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
18082 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
18083 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18084 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !38
18085 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !38
18086 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
18087 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
18088 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18089 // CHECK16:       omp.body.continue:
18090 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18091 // CHECK16:       omp.inner.for.inc:
18092 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
18093 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
18094 // CHECK16-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
18095 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
18096 // CHECK16:       omp.inner.for.end:
18097 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18098 // CHECK16:       omp.dispatch.inc:
18099 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
18100 // CHECK16:       omp.dispatch.end:
18101 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18102 // CHECK16-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
18103 // CHECK16-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18104 // CHECK16:       .omp.final.then:
18105 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18106 // CHECK16-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP23]], 0
18107 // CHECK16-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
18108 // CHECK16-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
18109 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
18110 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
18111 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18112 // CHECK16:       .omp.final.done:
18113 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18114 // CHECK16:       omp.precond.end:
18115 // CHECK16-NEXT:    ret void
18116 //
18117 //
18118 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
18119 // CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18120 // CHECK16-NEXT:  entry:
18121 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18122 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
18123 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
18124 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18125 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18126 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
18127 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18128 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
18129 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
18130 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18131 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
18132 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
18133 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18134 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
18135 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
18136 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18137 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18138 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18139 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
18140 // CHECK16-NEXT:    ret void
18141 //
18142 //
18143 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11
18144 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18145 // CHECK16-NEXT:  entry:
18146 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18147 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18148 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18149 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
18150 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
18151 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18152 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18153 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18154 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18155 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18156 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18157 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18158 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18159 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18160 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18161 // CHECK16-NEXT:    [[I4:%.*]] = alloca i32, align 4
18162 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18163 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
18164 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18165 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18166 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18167 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
18168 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
18169 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18170 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
18171 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
18172 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18173 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18174 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18175 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
18176 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18177 // CHECK16-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
18178 // CHECK16-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18179 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18180 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18181 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
18182 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18183 // CHECK16:       omp.precond.then:
18184 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18185 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18186 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
18187 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18188 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18189 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18190 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
18191 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18192 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18193 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18194 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
18195 // CHECK16-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18196 // CHECK16:       cond.true:
18197 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18198 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18199 // CHECK16:       cond.false:
18200 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18201 // CHECK16-NEXT:    br label [[COND_END]]
18202 // CHECK16:       cond.end:
18203 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
18204 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18205 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18206 // CHECK16-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
18207 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18208 // CHECK16:       omp.inner.for.cond:
18209 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
18210 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
18211 // CHECK16-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
18212 // CHECK16-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18213 // CHECK16:       omp.inner.for.body:
18214 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !41
18215 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
18216 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !41
18217 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !41
18218 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !41
18219 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41
18220 // CHECK16-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
18221 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
18222 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41
18223 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18224 // CHECK16:       omp.inner.for.inc:
18225 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
18226 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !41
18227 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
18228 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
18229 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
18230 // CHECK16:       omp.inner.for.end:
18231 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18232 // CHECK16:       omp.loop.exit:
18233 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18234 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
18235 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
18236 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18237 // CHECK16-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
18238 // CHECK16-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18239 // CHECK16:       .omp.final.then:
18240 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18241 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
18242 // CHECK16-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
18243 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
18244 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
18245 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
18246 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18247 // CHECK16:       .omp.final.done:
18248 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18249 // CHECK16:       omp.precond.end:
18250 // CHECK16-NEXT:    ret void
18251 //
18252 //
18253 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12
18254 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18255 // CHECK16-NEXT:  entry:
18256 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18257 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18258 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18259 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18260 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18261 // CHECK16-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
18262 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
18263 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18264 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18265 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18266 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18267 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18268 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18269 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18270 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18271 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18272 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18273 // CHECK16-NEXT:    [[I4:%.*]] = alloca i32, align 4
18274 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18275 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18276 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18277 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18278 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18279 // CHECK16-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
18280 // CHECK16-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
18281 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18282 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
18283 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
18284 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18285 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18286 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18287 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
18288 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18289 // CHECK16-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
18290 // CHECK16-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18291 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18292 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18293 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
18294 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18295 // CHECK16:       omp.precond.then:
18296 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18297 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18298 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
18299 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18300 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18301 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
18302 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18303 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18304 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18305 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18306 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18307 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18308 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18309 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
18310 // CHECK16-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
18311 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18312 // CHECK16:       omp.dispatch.cond:
18313 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18314 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
18315 // CHECK16-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
18316 // CHECK16-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
18317 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18318 // CHECK16:       omp.dispatch.body:
18319 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18320 // CHECK16-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
18321 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18322 // CHECK16:       omp.inner.for.cond:
18323 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
18324 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
18325 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
18326 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18327 // CHECK16:       omp.inner.for.body:
18328 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
18329 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
18330 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18331 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !44
18332 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !44
18333 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
18334 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
18335 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18336 // CHECK16:       omp.body.continue:
18337 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18338 // CHECK16:       omp.inner.for.inc:
18339 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
18340 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
18341 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
18342 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
18343 // CHECK16:       omp.inner.for.end:
18344 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18345 // CHECK16:       omp.dispatch.inc:
18346 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
18347 // CHECK16:       omp.dispatch.end:
18348 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18349 // CHECK16-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
18350 // CHECK16-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18351 // CHECK16:       .omp.final.then:
18352 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18353 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
18354 // CHECK16-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
18355 // CHECK16-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
18356 // CHECK16-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
18357 // CHECK16-NEXT:    store i32 [[ADD10]], i32* [[I4]], align 4
18358 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18359 // CHECK16:       .omp.final.done:
18360 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18361 // CHECK16:       omp.precond.end:
18362 // CHECK16-NEXT:    ret void
18363 //
18364 //
18365 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
18366 // CHECK16-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
18367 // CHECK16-NEXT:  entry:
18368 // CHECK16-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
18369 // CHECK16-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
18370 // CHECK16-NEXT:    [[M:%.*]] = alloca i32, align 4
18371 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
18372 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
18373 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
18374 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18375 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
18376 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
18377 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
18378 // CHECK16-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
18379 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18380 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
18381 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
18382 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
18383 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
18384 // CHECK16-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
18385 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
18386 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
18387 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
18388 // CHECK16-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
18389 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
18390 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
18391 // CHECK16-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
18392 // CHECK16-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
18393 // CHECK16-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
18394 // CHECK16-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
18395 // CHECK16-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
18396 // CHECK16-NEXT:    store i32 10, i32* [[M]], align 4
18397 // CHECK16-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18398 // CHECK16-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
18399 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
18400 // CHECK16-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18401 // CHECK16-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
18402 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
18403 // CHECK16-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
18404 // CHECK16-NEXT:    store i8* null, i8** [[TMP4]], align 4
18405 // CHECK16-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18406 // CHECK16-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18407 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
18408 // CHECK16-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
18409 // CHECK16-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
18410 // CHECK16-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
18411 // CHECK16:       omp_offload.failed:
18412 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
18413 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT]]
18414 // CHECK16:       omp_offload.cont:
18415 // CHECK16-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
18416 // CHECK16-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
18417 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
18418 // CHECK16-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
18419 // CHECK16-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
18420 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
18421 // CHECK16-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
18422 // CHECK16-NEXT:    store i8* null, i8** [[TMP13]], align 4
18423 // CHECK16-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
18424 // CHECK16-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
18425 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
18426 // CHECK16-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
18427 // CHECK16-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
18428 // CHECK16-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
18429 // CHECK16:       omp_offload.failed5:
18430 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
18431 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
18432 // CHECK16:       omp_offload.cont6:
18433 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
18434 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
18435 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18436 // CHECK16-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18437 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18438 // CHECK16-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
18439 // CHECK16-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
18440 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
18441 // CHECK16-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
18442 // CHECK16-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
18443 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
18444 // CHECK16-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
18445 // CHECK16-NEXT:    store i8* null, i8** [[TMP25]], align 4
18446 // CHECK16-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
18447 // CHECK16-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
18448 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[TMP27]], align 4
18449 // CHECK16-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
18450 // CHECK16-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
18451 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[TMP29]], align 4
18452 // CHECK16-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
18453 // CHECK16-NEXT:    store i8* null, i8** [[TMP30]], align 4
18454 // CHECK16-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
18455 // CHECK16-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
18456 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
18457 // CHECK16-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
18458 // CHECK16-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
18459 // CHECK16-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
18460 // CHECK16:       omp_offload.failed11:
18461 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
18462 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
18463 // CHECK16:       omp_offload.cont12:
18464 // CHECK16-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
18465 // CHECK16-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
18466 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
18467 // CHECK16-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
18468 // CHECK16-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
18469 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
18470 // CHECK16-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
18471 // CHECK16-NEXT:    store i8* null, i8** [[TMP39]], align 4
18472 // CHECK16-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
18473 // CHECK16-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
18474 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
18475 // CHECK16-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
18476 // CHECK16-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
18477 // CHECK16-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
18478 // CHECK16:       omp_offload.failed17:
18479 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
18480 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
18481 // CHECK16:       omp_offload.cont18:
18482 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
18483 // CHECK16-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
18484 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
18485 // CHECK16-NEXT:    store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
18486 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
18487 // CHECK16-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
18488 // CHECK16-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
18489 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
18490 // CHECK16-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
18491 // CHECK16-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
18492 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
18493 // CHECK16-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
18494 // CHECK16-NEXT:    store i8* null, i8** [[TMP51]], align 4
18495 // CHECK16-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
18496 // CHECK16-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
18497 // CHECK16-NEXT:    store i32 [[TMP46]], i32* [[TMP53]], align 4
18498 // CHECK16-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
18499 // CHECK16-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
18500 // CHECK16-NEXT:    store i32 [[TMP46]], i32* [[TMP55]], align 4
18501 // CHECK16-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
18502 // CHECK16-NEXT:    store i8* null, i8** [[TMP56]], align 4
18503 // CHECK16-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
18504 // CHECK16-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
18505 // CHECK16-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
18506 // CHECK16-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
18507 // CHECK16-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
18508 // CHECK16-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
18509 // CHECK16:       omp_offload.failed25:
18510 // CHECK16-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
18511 // CHECK16-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
18512 // CHECK16:       omp_offload.cont26:
18513 // CHECK16-NEXT:    ret i32 0
18514 //
18515 //
18516 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
18517 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18518 // CHECK16-NEXT:  entry:
18519 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18520 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18521 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18522 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
18523 // CHECK16-NEXT:    ret void
18524 //
18525 //
18526 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14
18527 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18528 // CHECK16-NEXT:  entry:
18529 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18530 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18531 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18532 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18533 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18534 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18535 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18536 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18537 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18538 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18539 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18540 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18541 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18542 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18543 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18544 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
18545 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18546 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18547 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18548 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
18549 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18550 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18551 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
18552 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18553 // CHECK16:       cond.true:
18554 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18555 // CHECK16:       cond.false:
18556 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18557 // CHECK16-NEXT:    br label [[COND_END]]
18558 // CHECK16:       cond.end:
18559 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
18560 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18561 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18562 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
18563 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18564 // CHECK16:       omp.inner.for.cond:
18565 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
18566 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
18567 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
18568 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18569 // CHECK16:       omp.inner.for.body:
18570 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47
18571 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
18572 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47
18573 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18574 // CHECK16:       omp.inner.for.inc:
18575 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
18576 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47
18577 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
18578 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
18579 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
18580 // CHECK16:       omp.inner.for.end:
18581 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18582 // CHECK16:       omp.loop.exit:
18583 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
18584 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18585 // CHECK16-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
18586 // CHECK16-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18587 // CHECK16:       .omp.final.then:
18588 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18589 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18590 // CHECK16:       .omp.final.done:
18591 // CHECK16-NEXT:    ret void
18592 //
18593 //
18594 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15
18595 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18596 // CHECK16-NEXT:  entry:
18597 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18598 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18599 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18600 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18601 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18602 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18603 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18604 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18605 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18606 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18607 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18608 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18609 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18610 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18611 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18612 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18613 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18614 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18615 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18616 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18617 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18618 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18619 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
18620 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
18621 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18622 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18623 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18624 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
18625 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18626 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18627 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
18628 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18629 // CHECK16:       cond.true:
18630 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18631 // CHECK16:       cond.false:
18632 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18633 // CHECK16-NEXT:    br label [[COND_END]]
18634 // CHECK16:       cond.end:
18635 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
18636 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18637 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18638 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
18639 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18640 // CHECK16:       omp.inner.for.cond:
18641 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
18642 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50
18643 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
18644 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18645 // CHECK16:       omp.inner.for.body:
18646 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
18647 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
18648 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18649 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50
18650 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !50
18651 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
18652 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !50
18653 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18654 // CHECK16:       omp.body.continue:
18655 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18656 // CHECK16:       omp.inner.for.inc:
18657 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
18658 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
18659 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
18660 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
18661 // CHECK16:       omp.inner.for.end:
18662 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18663 // CHECK16:       omp.loop.exit:
18664 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
18665 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18666 // CHECK16-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
18667 // CHECK16-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18668 // CHECK16:       .omp.final.then:
18669 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18670 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18671 // CHECK16:       .omp.final.done:
18672 // CHECK16-NEXT:    ret void
18673 //
18674 //
18675 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
18676 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18677 // CHECK16-NEXT:  entry:
18678 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18679 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18680 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18681 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
18682 // CHECK16-NEXT:    ret void
18683 //
18684 //
18685 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17
18686 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18687 // CHECK16-NEXT:  entry:
18688 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18689 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18690 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18691 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18692 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18693 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18694 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18695 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18696 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18697 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18698 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18699 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18700 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18701 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18702 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18703 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
18704 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18705 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18706 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18707 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
18708 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18709 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18710 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
18711 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18712 // CHECK16:       cond.true:
18713 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18714 // CHECK16:       cond.false:
18715 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18716 // CHECK16-NEXT:    br label [[COND_END]]
18717 // CHECK16:       cond.end:
18718 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
18719 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18720 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18721 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
18722 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18723 // CHECK16:       omp.inner.for.cond:
18724 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
18725 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
18726 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
18727 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18728 // CHECK16:       omp.inner.for.body:
18729 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53
18730 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
18731 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53
18732 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18733 // CHECK16:       omp.inner.for.inc:
18734 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
18735 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !53
18736 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
18737 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
18738 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
18739 // CHECK16:       omp.inner.for.end:
18740 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18741 // CHECK16:       omp.loop.exit:
18742 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
18743 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18744 // CHECK16-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
18745 // CHECK16-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18746 // CHECK16:       .omp.final.then:
18747 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18748 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18749 // CHECK16:       .omp.final.done:
18750 // CHECK16-NEXT:    ret void
18751 //
18752 //
18753 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18
18754 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
18755 // CHECK16-NEXT:  entry:
18756 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18757 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18758 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18759 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18760 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18761 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18762 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18763 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18764 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18765 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18766 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18767 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18768 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18769 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18770 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18771 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18772 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18773 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18774 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18775 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18776 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18777 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18778 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
18779 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
18780 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18781 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18782 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18783 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
18784 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18785 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18786 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
18787 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18788 // CHECK16:       cond.true:
18789 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18790 // CHECK16:       cond.false:
18791 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18792 // CHECK16-NEXT:    br label [[COND_END]]
18793 // CHECK16:       cond.end:
18794 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
18795 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18796 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18797 // CHECK16-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
18798 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18799 // CHECK16:       omp.inner.for.cond:
18800 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
18801 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !56
18802 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
18803 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18804 // CHECK16:       omp.inner.for.body:
18805 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
18806 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
18807 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18808 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !56
18809 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !56
18810 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
18811 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !56
18812 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18813 // CHECK16:       omp.body.continue:
18814 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18815 // CHECK16:       omp.inner.for.inc:
18816 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
18817 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
18818 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
18819 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
18820 // CHECK16:       omp.inner.for.end:
18821 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18822 // CHECK16:       omp.loop.exit:
18823 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
18824 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18825 // CHECK16-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
18826 // CHECK16-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18827 // CHECK16:       .omp.final.then:
18828 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18829 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18830 // CHECK16:       .omp.final.done:
18831 // CHECK16-NEXT:    ret void
18832 //
18833 //
18834 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
18835 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18836 // CHECK16-NEXT:  entry:
18837 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18838 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18839 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
18840 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18841 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18842 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18843 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18844 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18845 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
18846 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
18847 // CHECK16-NEXT:    ret void
18848 //
18849 //
18850 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21
18851 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18852 // CHECK16-NEXT:  entry:
18853 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18854 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18855 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18856 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18857 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18858 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18859 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18860 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18861 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18862 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18863 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18864 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
18865 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18866 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18867 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18868 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18869 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18870 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18871 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
18872 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18873 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18874 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18875 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
18876 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18877 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18878 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
18879 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18880 // CHECK16:       cond.true:
18881 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18882 // CHECK16:       cond.false:
18883 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18884 // CHECK16-NEXT:    br label [[COND_END]]
18885 // CHECK16:       cond.end:
18886 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
18887 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18888 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18889 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
18890 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18891 // CHECK16:       omp.inner.for.cond:
18892 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
18893 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
18894 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
18895 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18896 // CHECK16:       omp.inner.for.body:
18897 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59
18898 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
18899 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59
18900 // CHECK16-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
18901 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
18902 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59
18903 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18904 // CHECK16:       omp.inner.for.inc:
18905 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
18906 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59
18907 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
18908 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
18909 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
18910 // CHECK16:       omp.inner.for.end:
18911 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18912 // CHECK16:       omp.loop.exit:
18913 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
18914 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18915 // CHECK16-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
18916 // CHECK16-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18917 // CHECK16:       .omp.final.then:
18918 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18919 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18920 // CHECK16:       .omp.final.done:
18921 // CHECK16-NEXT:    ret void
18922 //
18923 //
18924 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22
18925 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
18926 // CHECK16-NEXT:  entry:
18927 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18928 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18929 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18930 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18931 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
18932 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
18933 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18934 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18935 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18936 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18937 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18938 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18939 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18940 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18941 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18942 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18943 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18944 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
18945 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18946 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
18947 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18948 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18949 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18950 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18951 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
18952 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
18953 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18954 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18955 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
18956 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18957 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
18958 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
18959 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18960 // CHECK16:       omp.dispatch.cond:
18961 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18962 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18963 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
18964 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18965 // CHECK16:       cond.true:
18966 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18967 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18968 // CHECK16:       cond.false:
18969 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18970 // CHECK16-NEXT:    br label [[COND_END]]
18971 // CHECK16:       cond.end:
18972 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
18973 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18974 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18975 // CHECK16-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
18976 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18977 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18978 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
18979 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18980 // CHECK16:       omp.dispatch.body:
18981 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18982 // CHECK16:       omp.inner.for.cond:
18983 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
18984 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62
18985 // CHECK16-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
18986 // CHECK16-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18987 // CHECK16:       omp.inner.for.body:
18988 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
18989 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
18990 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18991 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62
18992 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !62
18993 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
18994 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !62
18995 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18996 // CHECK16:       omp.body.continue:
18997 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18998 // CHECK16:       omp.inner.for.inc:
18999 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
19000 // CHECK16-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
19001 // CHECK16-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
19002 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
19003 // CHECK16:       omp.inner.for.end:
19004 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19005 // CHECK16:       omp.dispatch.inc:
19006 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19007 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19008 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
19009 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
19010 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19011 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19012 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
19013 // CHECK16-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
19014 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
19015 // CHECK16:       omp.dispatch.end:
19016 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
19017 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19018 // CHECK16-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
19019 // CHECK16-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19020 // CHECK16:       .omp.final.then:
19021 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
19022 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19023 // CHECK16:       .omp.final.done:
19024 // CHECK16-NEXT:    ret void
19025 //
19026 //
19027 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
19028 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
19029 // CHECK16-NEXT:  entry:
19030 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19031 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19032 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19033 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
19034 // CHECK16-NEXT:    ret void
19035 //
19036 //
19037 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25
19038 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
19039 // CHECK16-NEXT:  entry:
19040 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19041 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19042 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19043 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19044 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19045 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19046 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19047 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19048 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19049 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
19050 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19051 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19052 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19053 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19054 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19055 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
19056 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19057 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19058 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19059 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19060 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19061 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19062 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
19063 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19064 // CHECK16:       cond.true:
19065 // CHECK16-NEXT:    br label [[COND_END:%.*]]
19066 // CHECK16:       cond.false:
19067 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19068 // CHECK16-NEXT:    br label [[COND_END]]
19069 // CHECK16:       cond.end:
19070 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19071 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19072 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19073 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19074 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19075 // CHECK16:       omp.inner.for.cond:
19076 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
19077 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
19078 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
19079 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19080 // CHECK16:       omp.inner.for.body:
19081 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65
19082 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
19083 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65
19084 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19085 // CHECK16:       omp.inner.for.inc:
19086 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
19087 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !65
19088 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
19089 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
19090 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
19091 // CHECK16:       omp.inner.for.end:
19092 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19093 // CHECK16:       omp.loop.exit:
19094 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
19095 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19096 // CHECK16-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
19097 // CHECK16-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19098 // CHECK16:       .omp.final.then:
19099 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
19100 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19101 // CHECK16:       .omp.final.done:
19102 // CHECK16-NEXT:    ret void
19103 //
19104 //
19105 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26
19106 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
19107 // CHECK16-NEXT:  entry:
19108 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19109 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19110 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19111 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19112 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19113 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19114 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19115 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19116 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19117 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19118 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19119 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
19120 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19121 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19122 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19123 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19124 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19125 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19126 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19127 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19128 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19129 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19130 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
19131 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
19132 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19133 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19134 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19135 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19136 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19137 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
19138 // CHECK16-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
19139 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19140 // CHECK16:       omp.dispatch.cond:
19141 // CHECK16-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
19142 // CHECK16-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
19143 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19144 // CHECK16:       omp.dispatch.body:
19145 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19146 // CHECK16-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
19147 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19148 // CHECK16:       omp.inner.for.cond:
19149 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
19150 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !68
19151 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
19152 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19153 // CHECK16:       omp.inner.for.body:
19154 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
19155 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
19156 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19157 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !68
19158 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !68
19159 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
19160 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !68
19161 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19162 // CHECK16:       omp.body.continue:
19163 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19164 // CHECK16:       omp.inner.for.inc:
19165 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
19166 // CHECK16-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
19167 // CHECK16-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
19168 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
19169 // CHECK16:       omp.inner.for.end:
19170 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19171 // CHECK16:       omp.dispatch.inc:
19172 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
19173 // CHECK16:       omp.dispatch.end:
19174 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19175 // CHECK16-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
19176 // CHECK16-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19177 // CHECK16:       .omp.final.then:
19178 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
19179 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19180 // CHECK16:       .omp.final.done:
19181 // CHECK16-NEXT:    ret void
19182 //
19183 //
19184 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
19185 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
19186 // CHECK16-NEXT:  entry:
19187 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19188 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19189 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19190 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19191 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19192 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19193 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19194 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19195 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19196 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
19197 // CHECK16-NEXT:    ret void
19198 //
19199 //
19200 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29
19201 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
19202 // CHECK16-NEXT:  entry:
19203 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19204 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19205 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19206 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19207 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19208 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19209 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19210 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19211 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19212 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19213 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
19214 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19215 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19216 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19217 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19218 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19219 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19220 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19221 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
19222 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19223 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19224 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19225 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19226 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19227 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19228 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
19229 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19230 // CHECK16:       cond.true:
19231 // CHECK16-NEXT:    br label [[COND_END:%.*]]
19232 // CHECK16:       cond.false:
19233 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19234 // CHECK16-NEXT:    br label [[COND_END]]
19235 // CHECK16:       cond.end:
19236 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19237 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19238 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19239 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19240 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19241 // CHECK16:       omp.inner.for.cond:
19242 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
19243 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
19244 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
19245 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19246 // CHECK16:       omp.inner.for.body:
19247 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !71
19248 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
19249 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71
19250 // CHECK16-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
19251 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
19252 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71
19253 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19254 // CHECK16:       omp.inner.for.inc:
19255 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
19256 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !71
19257 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
19258 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
19259 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
19260 // CHECK16:       omp.inner.for.end:
19261 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19262 // CHECK16:       omp.loop.exit:
19263 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
19264 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19265 // CHECK16-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
19266 // CHECK16-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19267 // CHECK16:       .omp.final.then:
19268 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
19269 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19270 // CHECK16:       .omp.final.done:
19271 // CHECK16-NEXT:    ret void
19272 //
19273 //
19274 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30
19275 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
19276 // CHECK16-NEXT:  entry:
19277 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19278 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19279 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19280 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19281 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
19282 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19283 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19284 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19285 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19286 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19287 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19288 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19289 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
19290 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19291 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19292 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19293 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19294 // CHECK16-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
19295 // CHECK16-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19296 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
19297 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19298 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19299 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19300 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19301 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
19302 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
19303 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19304 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19305 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19306 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19307 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19308 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19309 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
19310 // CHECK16-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
19311 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19312 // CHECK16:       omp.dispatch.cond:
19313 // CHECK16-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
19314 // CHECK16-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
19315 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19316 // CHECK16:       omp.dispatch.body:
19317 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19318 // CHECK16-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
19319 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19320 // CHECK16:       omp.inner.for.cond:
19321 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
19322 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !74
19323 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
19324 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19325 // CHECK16:       omp.inner.for.body:
19326 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
19327 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
19328 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19329 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !74
19330 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !74
19331 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
19332 // CHECK16-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !74
19333 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19334 // CHECK16:       omp.body.continue:
19335 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19336 // CHECK16:       omp.inner.for.inc:
19337 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
19338 // CHECK16-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
19339 // CHECK16-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
19340 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
19341 // CHECK16:       omp.inner.for.end:
19342 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19343 // CHECK16:       omp.dispatch.inc:
19344 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
19345 // CHECK16:       omp.dispatch.end:
19346 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19347 // CHECK16-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
19348 // CHECK16-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19349 // CHECK16:       .omp.final.then:
19350 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
19351 // CHECK16-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19352 // CHECK16:       .omp.final.done:
19353 // CHECK16-NEXT:    ret void
19354 //
19355 //
19356 // CHECK16-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
19357 // CHECK16-SAME: () #[[ATTR5:[0-9]+]] {
19358 // CHECK16-NEXT:  entry:
19359 // CHECK16-NEXT:    call void @__tgt_register_requires(i64 1)
19360 // CHECK16-NEXT:    ret void
19361 //
19362 //
19363 // CHECK17-LABEL: define {{[^@]+}}@main
19364 // CHECK17-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
19365 // CHECK17-NEXT:  entry:
19366 // CHECK17-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
19367 // CHECK17-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
19368 // CHECK17-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
19369 // CHECK17-NEXT:    [[N:%.*]] = alloca i32, align 4
19370 // CHECK17-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
19371 // CHECK17-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
19372 // CHECK17-NEXT:    [[M:%.*]] = alloca i32, align 4
19373 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19374 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
19375 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
19376 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
19377 // CHECK17-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
19378 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19379 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19380 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19381 // CHECK17-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
19382 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
19383 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
19384 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
19385 // CHECK17-NEXT:    [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
19386 // CHECK17-NEXT:    [[_TMP9:%.*]] = alloca i32, align 4
19387 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
19388 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
19389 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
19390 // CHECK17-NEXT:    [[N_CASTED19:%.*]] = alloca i64, align 8
19391 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
19392 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
19393 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
19394 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
19395 // CHECK17-NEXT:    [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
19396 // CHECK17-NEXT:    [[_TMP26:%.*]] = alloca i32, align 4
19397 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
19398 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
19399 // CHECK17-NEXT:    [[N_CASTED35:%.*]] = alloca i64, align 8
19400 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
19401 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
19402 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
19403 // CHECK17-NEXT:    [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
19404 // CHECK17-NEXT:    [[_TMP41:%.*]] = alloca i32, align 4
19405 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
19406 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
19407 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
19408 // CHECK17-NEXT:    [[N_CASTED51:%.*]] = alloca i64, align 8
19409 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
19410 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
19411 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
19412 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
19413 // CHECK17-NEXT:    [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
19414 // CHECK17-NEXT:    [[_TMP59:%.*]] = alloca i32, align 4
19415 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
19416 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
19417 // CHECK17-NEXT:    store i32 0, i32* [[RETVAL]], align 4
19418 // CHECK17-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
19419 // CHECK17-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
19420 // CHECK17-NEXT:    store i32 100, i32* [[N]], align 4
19421 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
19422 // CHECK17-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
19423 // CHECK17-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
19424 // CHECK17-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
19425 // CHECK17-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
19426 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
19427 // CHECK17-NEXT:    store i32 10, i32* [[M]], align 4
19428 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
19429 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19430 // CHECK17-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4
19431 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
19432 // CHECK17-NEXT:    [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
19433 // CHECK17-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
19434 // CHECK17-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
19435 // CHECK17-NEXT:    store i64 [[TMP4]], i64* [[TMP7]], align 8
19436 // CHECK17-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
19437 // CHECK17-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
19438 // CHECK17-NEXT:    store i64 [[TMP4]], i64* [[TMP9]], align 8
19439 // CHECK17-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
19440 // CHECK17-NEXT:    store i64 4, i64* [[TMP10]], align 8
19441 // CHECK17-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
19442 // CHECK17-NEXT:    store i8* null, i8** [[TMP11]], align 8
19443 // CHECK17-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
19444 // CHECK17-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
19445 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP13]], align 8
19446 // CHECK17-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
19447 // CHECK17-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
19448 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP15]], align 8
19449 // CHECK17-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
19450 // CHECK17-NEXT:    store i64 8, i64* [[TMP16]], align 8
19451 // CHECK17-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
19452 // CHECK17-NEXT:    store i8* null, i8** [[TMP17]], align 8
19453 // CHECK17-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
19454 // CHECK17-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
19455 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 8
19456 // CHECK17-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
19457 // CHECK17-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
19458 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 8
19459 // CHECK17-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
19460 // CHECK17-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
19461 // CHECK17-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
19462 // CHECK17-NEXT:    store i8* null, i8** [[TMP23]], align 8
19463 // CHECK17-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
19464 // CHECK17-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
19465 // CHECK17-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
19466 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
19467 // CHECK17-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
19468 // CHECK17-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19469 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
19470 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19471 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19472 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19473 // CHECK17-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19474 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
19475 // CHECK17-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
19476 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
19477 // CHECK17-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
19478 // CHECK17-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19479 // CHECK17-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
19480 // CHECK17:       omp_offload.failed:
19481 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
19482 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT]]
19483 // CHECK17:       omp_offload.cont:
19484 // CHECK17-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
19485 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
19486 // CHECK17-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
19487 // CHECK17-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
19488 // CHECK17-NEXT:    [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
19489 // CHECK17-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
19490 // CHECK17-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
19491 // CHECK17-NEXT:    store i64 [[TMP34]], i64* [[TMP37]], align 8
19492 // CHECK17-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
19493 // CHECK17-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
19494 // CHECK17-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
19495 // CHECK17-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
19496 // CHECK17-NEXT:    store i64 4, i64* [[TMP40]], align 8
19497 // CHECK17-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
19498 // CHECK17-NEXT:    store i8* null, i8** [[TMP41]], align 8
19499 // CHECK17-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
19500 // CHECK17-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
19501 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP43]], align 8
19502 // CHECK17-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
19503 // CHECK17-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
19504 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP45]], align 8
19505 // CHECK17-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
19506 // CHECK17-NEXT:    store i64 8, i64* [[TMP46]], align 8
19507 // CHECK17-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
19508 // CHECK17-NEXT:    store i8* null, i8** [[TMP47]], align 8
19509 // CHECK17-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
19510 // CHECK17-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
19511 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP49]], align 8
19512 // CHECK17-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
19513 // CHECK17-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
19514 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP51]], align 8
19515 // CHECK17-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
19516 // CHECK17-NEXT:    store i64 [[TMP35]], i64* [[TMP52]], align 8
19517 // CHECK17-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
19518 // CHECK17-NEXT:    store i8* null, i8** [[TMP53]], align 8
19519 // CHECK17-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
19520 // CHECK17-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
19521 // CHECK17-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
19522 // CHECK17-NEXT:    [[TMP57:%.*]] = load i32, i32* [[N]], align 4
19523 // CHECK17-NEXT:    store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
19524 // CHECK17-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
19525 // CHECK17-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
19526 // CHECK17-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
19527 // CHECK17-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
19528 // CHECK17-NEXT:    store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
19529 // CHECK17-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
19530 // CHECK17-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
19531 // CHECK17-NEXT:    [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
19532 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
19533 // CHECK17-NEXT:    [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
19534 // CHECK17-NEXT:    [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
19535 // CHECK17-NEXT:    br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
19536 // CHECK17:       omp_offload.failed16:
19537 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
19538 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
19539 // CHECK17:       omp_offload.cont17:
19540 // CHECK17-NEXT:    [[TMP63:%.*]] = load i32, i32* [[M]], align 4
19541 // CHECK17-NEXT:    store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
19542 // CHECK17-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N]], align 4
19543 // CHECK17-NEXT:    [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
19544 // CHECK17-NEXT:    store i32 [[TMP64]], i32* [[CONV20]], align 4
19545 // CHECK17-NEXT:    [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
19546 // CHECK17-NEXT:    [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
19547 // CHECK17-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
19548 // CHECK17-NEXT:    store i32 [[TMP66]], i32* [[CONV21]], align 4
19549 // CHECK17-NEXT:    [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
19550 // CHECK17-NEXT:    [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
19551 // CHECK17-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
19552 // CHECK17-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
19553 // CHECK17-NEXT:    store i64 [[TMP65]], i64* [[TMP70]], align 8
19554 // CHECK17-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
19555 // CHECK17-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
19556 // CHECK17-NEXT:    store i64 [[TMP65]], i64* [[TMP72]], align 8
19557 // CHECK17-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
19558 // CHECK17-NEXT:    store i64 4, i64* [[TMP73]], align 8
19559 // CHECK17-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
19560 // CHECK17-NEXT:    store i8* null, i8** [[TMP74]], align 8
19561 // CHECK17-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
19562 // CHECK17-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
19563 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP76]], align 8
19564 // CHECK17-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
19565 // CHECK17-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
19566 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP78]], align 8
19567 // CHECK17-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
19568 // CHECK17-NEXT:    store i64 8, i64* [[TMP79]], align 8
19569 // CHECK17-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
19570 // CHECK17-NEXT:    store i8* null, i8** [[TMP80]], align 8
19571 // CHECK17-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
19572 // CHECK17-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
19573 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP82]], align 8
19574 // CHECK17-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
19575 // CHECK17-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
19576 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 8
19577 // CHECK17-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
19578 // CHECK17-NEXT:    store i64 [[TMP68]], i64* [[TMP85]], align 8
19579 // CHECK17-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
19580 // CHECK17-NEXT:    store i8* null, i8** [[TMP86]], align 8
19581 // CHECK17-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
19582 // CHECK17-NEXT:    [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
19583 // CHECK17-NEXT:    store i64 [[TMP67]], i64* [[TMP88]], align 8
19584 // CHECK17-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
19585 // CHECK17-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
19586 // CHECK17-NEXT:    store i64 [[TMP67]], i64* [[TMP90]], align 8
19587 // CHECK17-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
19588 // CHECK17-NEXT:    store i64 4, i64* [[TMP91]], align 8
19589 // CHECK17-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
19590 // CHECK17-NEXT:    store i8* null, i8** [[TMP92]], align 8
19591 // CHECK17-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
19592 // CHECK17-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
19593 // CHECK17-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
19594 // CHECK17-NEXT:    [[TMP96:%.*]] = load i32, i32* [[N]], align 4
19595 // CHECK17-NEXT:    store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
19596 // CHECK17-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
19597 // CHECK17-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
19598 // CHECK17-NEXT:    [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
19599 // CHECK17-NEXT:    [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
19600 // CHECK17-NEXT:    store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
19601 // CHECK17-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
19602 // CHECK17-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
19603 // CHECK17-NEXT:    [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
19604 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
19605 // CHECK17-NEXT:    [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
19606 // CHECK17-NEXT:    [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
19607 // CHECK17-NEXT:    br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
19608 // CHECK17:       omp_offload.failed33:
19609 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
19610 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT34]]
19611 // CHECK17:       omp_offload.cont34:
19612 // CHECK17-NEXT:    [[TMP102:%.*]] = load i32, i32* [[N]], align 4
19613 // CHECK17-NEXT:    [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
19614 // CHECK17-NEXT:    store i32 [[TMP102]], i32* [[CONV36]], align 4
19615 // CHECK17-NEXT:    [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
19616 // CHECK17-NEXT:    [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
19617 // CHECK17-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
19618 // CHECK17-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
19619 // CHECK17-NEXT:    store i64 [[TMP103]], i64* [[TMP106]], align 8
19620 // CHECK17-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
19621 // CHECK17-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
19622 // CHECK17-NEXT:    store i64 [[TMP103]], i64* [[TMP108]], align 8
19623 // CHECK17-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
19624 // CHECK17-NEXT:    store i64 4, i64* [[TMP109]], align 8
19625 // CHECK17-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
19626 // CHECK17-NEXT:    store i8* null, i8** [[TMP110]], align 8
19627 // CHECK17-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
19628 // CHECK17-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
19629 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP112]], align 8
19630 // CHECK17-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
19631 // CHECK17-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
19632 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP114]], align 8
19633 // CHECK17-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
19634 // CHECK17-NEXT:    store i64 8, i64* [[TMP115]], align 8
19635 // CHECK17-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
19636 // CHECK17-NEXT:    store i8* null, i8** [[TMP116]], align 8
19637 // CHECK17-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
19638 // CHECK17-NEXT:    [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
19639 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP118]], align 8
19640 // CHECK17-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
19641 // CHECK17-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
19642 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP120]], align 8
19643 // CHECK17-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
19644 // CHECK17-NEXT:    store i64 [[TMP104]], i64* [[TMP121]], align 8
19645 // CHECK17-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
19646 // CHECK17-NEXT:    store i8* null, i8** [[TMP122]], align 8
19647 // CHECK17-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
19648 // CHECK17-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
19649 // CHECK17-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
19650 // CHECK17-NEXT:    [[TMP126:%.*]] = load i32, i32* [[N]], align 4
19651 // CHECK17-NEXT:    store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
19652 // CHECK17-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
19653 // CHECK17-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
19654 // CHECK17-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
19655 // CHECK17-NEXT:    [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
19656 // CHECK17-NEXT:    store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
19657 // CHECK17-NEXT:    [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
19658 // CHECK17-NEXT:    [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
19659 // CHECK17-NEXT:    [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
19660 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
19661 // CHECK17-NEXT:    [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
19662 // CHECK17-NEXT:    [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
19663 // CHECK17-NEXT:    br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
19664 // CHECK17:       omp_offload.failed48:
19665 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
19666 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT49]]
19667 // CHECK17:       omp_offload.cont49:
19668 // CHECK17-NEXT:    [[TMP132:%.*]] = load i32, i32* [[M]], align 4
19669 // CHECK17-NEXT:    store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
19670 // CHECK17-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
19671 // CHECK17-NEXT:    [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
19672 // CHECK17-NEXT:    store i32 [[TMP133]], i32* [[CONV52]], align 4
19673 // CHECK17-NEXT:    [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
19674 // CHECK17-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
19675 // CHECK17-NEXT:    [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
19676 // CHECK17-NEXT:    store i32 [[TMP135]], i32* [[CONV54]], align 4
19677 // CHECK17-NEXT:    [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
19678 // CHECK17-NEXT:    [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
19679 // CHECK17-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
19680 // CHECK17-NEXT:    [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
19681 // CHECK17-NEXT:    store i64 [[TMP134]], i64* [[TMP139]], align 8
19682 // CHECK17-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
19683 // CHECK17-NEXT:    [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
19684 // CHECK17-NEXT:    store i64 [[TMP134]], i64* [[TMP141]], align 8
19685 // CHECK17-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
19686 // CHECK17-NEXT:    store i64 4, i64* [[TMP142]], align 8
19687 // CHECK17-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
19688 // CHECK17-NEXT:    store i8* null, i8** [[TMP143]], align 8
19689 // CHECK17-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
19690 // CHECK17-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
19691 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP145]], align 8
19692 // CHECK17-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
19693 // CHECK17-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
19694 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP147]], align 8
19695 // CHECK17-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
19696 // CHECK17-NEXT:    store i64 8, i64* [[TMP148]], align 8
19697 // CHECK17-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
19698 // CHECK17-NEXT:    store i8* null, i8** [[TMP149]], align 8
19699 // CHECK17-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
19700 // CHECK17-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
19701 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP151]], align 8
19702 // CHECK17-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
19703 // CHECK17-NEXT:    [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
19704 // CHECK17-NEXT:    store i32* [[VLA]], i32** [[TMP153]], align 8
19705 // CHECK17-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
19706 // CHECK17-NEXT:    store i64 [[TMP137]], i64* [[TMP154]], align 8
19707 // CHECK17-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
19708 // CHECK17-NEXT:    store i8* null, i8** [[TMP155]], align 8
19709 // CHECK17-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
19710 // CHECK17-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
19711 // CHECK17-NEXT:    store i64 [[TMP136]], i64* [[TMP157]], align 8
19712 // CHECK17-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
19713 // CHECK17-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
19714 // CHECK17-NEXT:    store i64 [[TMP136]], i64* [[TMP159]], align 8
19715 // CHECK17-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
19716 // CHECK17-NEXT:    store i64 4, i64* [[TMP160]], align 8
19717 // CHECK17-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
19718 // CHECK17-NEXT:    store i8* null, i8** [[TMP161]], align 8
19719 // CHECK17-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
19720 // CHECK17-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
19721 // CHECK17-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
19722 // CHECK17-NEXT:    [[TMP165:%.*]] = load i32, i32* [[N]], align 4
19723 // CHECK17-NEXT:    store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
19724 // CHECK17-NEXT:    [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
19725 // CHECK17-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
19726 // CHECK17-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
19727 // CHECK17-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
19728 // CHECK17-NEXT:    store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
19729 // CHECK17-NEXT:    [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
19730 // CHECK17-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
19731 // CHECK17-NEXT:    [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
19732 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
19733 // CHECK17-NEXT:    [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
19734 // CHECK17-NEXT:    [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
19735 // CHECK17-NEXT:    br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
19736 // CHECK17:       omp_offload.failed66:
19737 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
19738 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT67]]
19739 // CHECK17:       omp_offload.cont67:
19740 // CHECK17-NEXT:    [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
19741 // CHECK17-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
19742 // CHECK17-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
19743 // CHECK17-NEXT:    [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
19744 // CHECK17-NEXT:    call void @llvm.stackrestore(i8* [[TMP172]])
19745 // CHECK17-NEXT:    [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
19746 // CHECK17-NEXT:    ret i32 [[TMP173]]
19747 //
19748 //
19749 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
19750 // CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
19751 // CHECK17-NEXT:  entry:
19752 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19753 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19754 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
19755 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19756 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19757 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19758 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
19759 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19760 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19761 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
19762 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
19763 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19764 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
19765 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
19766 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
19767 // CHECK17-NEXT:    ret void
19768 //
19769 //
19770 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
19771 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
19772 // CHECK17-NEXT:  entry:
19773 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19774 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19775 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19776 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19777 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
19778 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19779 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19780 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19781 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19782 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
19783 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19784 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19785 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19786 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19787 // CHECK17-NEXT:    [[I3:%.*]] = alloca i32, align 4
19788 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19789 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19790 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19791 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19792 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19793 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
19794 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19795 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19796 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
19797 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
19798 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
19799 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19800 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
19801 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19802 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19803 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19804 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
19805 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19806 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
19807 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19808 // CHECK17:       omp.precond.then:
19809 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19810 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19811 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
19812 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19813 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19814 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19815 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
19816 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19817 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19818 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19819 // CHECK17-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
19820 // CHECK17-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19821 // CHECK17:       cond.true:
19822 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19823 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19824 // CHECK17:       cond.false:
19825 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19826 // CHECK17-NEXT:    br label [[COND_END]]
19827 // CHECK17:       cond.end:
19828 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
19829 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19830 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19831 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
19832 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19833 // CHECK17:       omp.inner.for.cond:
19834 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
19835 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
19836 // CHECK17-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
19837 // CHECK17-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19838 // CHECK17:       omp.inner.for.body:
19839 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !13
19840 // CHECK17-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
19841 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
19842 // CHECK17-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
19843 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !13
19844 // CHECK17-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19845 // CHECK17-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !13
19846 // CHECK17-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !13
19847 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !13
19848 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19849 // CHECK17:       omp.inner.for.inc:
19850 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
19851 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !13
19852 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
19853 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
19854 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
19855 // CHECK17:       omp.inner.for.end:
19856 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19857 // CHECK17:       omp.loop.exit:
19858 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19859 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
19860 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
19861 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19862 // CHECK17-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
19863 // CHECK17-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19864 // CHECK17:       .omp.final.then:
19865 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19866 // CHECK17-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
19867 // CHECK17-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
19868 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
19869 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
19870 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
19871 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19872 // CHECK17:       .omp.final.done:
19873 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
19874 // CHECK17:       omp.precond.end:
19875 // CHECK17-NEXT:    ret void
19876 //
19877 //
19878 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1
19879 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
19880 // CHECK17-NEXT:  entry:
19881 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19882 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19883 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
19884 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
19885 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19886 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19887 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
19888 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19889 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19890 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19891 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19892 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
19893 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19894 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19895 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19896 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19897 // CHECK17-NEXT:    [[I5:%.*]] = alloca i32, align 4
19898 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19899 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19900 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19901 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19902 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19903 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19904 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
19905 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19906 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19907 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
19908 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
19909 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
19910 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19911 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
19912 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19913 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19914 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19915 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
19916 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19917 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
19918 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19919 // CHECK17:       omp.precond.then:
19920 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19921 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19922 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
19923 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19924 // CHECK17-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
19925 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19926 // CHECK17-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
19927 // CHECK17-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
19928 // CHECK17-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
19929 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19930 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19931 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19932 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19933 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19934 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19935 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19936 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
19937 // CHECK17-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19938 // CHECK17:       cond.true:
19939 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19940 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19941 // CHECK17:       cond.false:
19942 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19943 // CHECK17-NEXT:    br label [[COND_END]]
19944 // CHECK17:       cond.end:
19945 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19946 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19947 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19948 // CHECK17-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
19949 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19950 // CHECK17:       omp.inner.for.cond:
19951 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
19952 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !17
19953 // CHECK17-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
19954 // CHECK17-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19955 // CHECK17:       omp.inner.for.body:
19956 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
19957 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
19958 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19959 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !17
19960 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !17
19961 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
19962 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
19963 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !17
19964 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19965 // CHECK17:       omp.body.continue:
19966 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19967 // CHECK17:       omp.inner.for.inc:
19968 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
19969 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
19970 // CHECK17-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
19971 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
19972 // CHECK17:       omp.inner.for.end:
19973 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19974 // CHECK17:       omp.loop.exit:
19975 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19976 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
19977 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
19978 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19979 // CHECK17-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
19980 // CHECK17-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19981 // CHECK17:       .omp.final.then:
19982 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19983 // CHECK17-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
19984 // CHECK17-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
19985 // CHECK17-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
19986 // CHECK17-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
19987 // CHECK17-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
19988 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19989 // CHECK17:       .omp.final.done:
19990 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
19991 // CHECK17:       omp.precond.end:
19992 // CHECK17-NEXT:    ret void
19993 //
19994 //
19995 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
19996 // CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
19997 // CHECK17-NEXT:  entry:
19998 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19999 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20000 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20001 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20002 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20003 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20004 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20005 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20006 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20007 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20008 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20009 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20010 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
20011 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
20012 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
20013 // CHECK17-NEXT:    ret void
20014 //
20015 //
20016 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2
20017 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
20018 // CHECK17-NEXT:  entry:
20019 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20020 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20021 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20022 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20023 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20024 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20025 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20026 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20027 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20028 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20029 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20030 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20031 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20032 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20033 // CHECK17-NEXT:    [[I3:%.*]] = alloca i32, align 4
20034 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20035 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20036 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20037 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20038 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20039 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20040 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20041 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20042 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20043 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20044 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
20045 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20046 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20047 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20048 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20049 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20050 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20051 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20052 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20053 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20054 // CHECK17:       omp.precond.then:
20055 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20056 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20057 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
20058 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20059 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20060 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20061 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
20062 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20063 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20064 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20065 // CHECK17-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
20066 // CHECK17-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20067 // CHECK17:       cond.true:
20068 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20069 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20070 // CHECK17:       cond.false:
20071 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20072 // CHECK17-NEXT:    br label [[COND_END]]
20073 // CHECK17:       cond.end:
20074 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
20075 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20076 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20077 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
20078 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20079 // CHECK17:       omp.inner.for.cond:
20080 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20081 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
20082 // CHECK17-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
20083 // CHECK17-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20084 // CHECK17:       omp.inner.for.body:
20085 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !22
20086 // CHECK17-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
20087 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
20088 // CHECK17-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
20089 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !22
20090 // CHECK17-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20091 // CHECK17-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !22
20092 // CHECK17-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !22
20093 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !22
20094 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20095 // CHECK17:       omp.inner.for.inc:
20096 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20097 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !22
20098 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
20099 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20100 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
20101 // CHECK17:       omp.inner.for.end:
20102 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20103 // CHECK17:       omp.loop.exit:
20104 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20105 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
20106 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
20107 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20108 // CHECK17-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
20109 // CHECK17-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20110 // CHECK17:       .omp.final.then:
20111 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20112 // CHECK17-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
20113 // CHECK17-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
20114 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
20115 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
20116 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
20117 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20118 // CHECK17:       .omp.final.done:
20119 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20120 // CHECK17:       omp.precond.end:
20121 // CHECK17-NEXT:    ret void
20122 //
20123 //
20124 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3
20125 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
20126 // CHECK17-NEXT:  entry:
20127 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20128 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20129 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20130 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20131 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20132 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20133 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20134 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20135 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20136 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20137 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20138 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20139 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20140 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20141 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20142 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20143 // CHECK17-NEXT:    [[I5:%.*]] = alloca i32, align 4
20144 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20145 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20146 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20147 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20148 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20149 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20150 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20151 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20152 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20153 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20154 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20155 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
20156 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20157 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20158 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20159 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20160 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20161 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20162 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20163 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20164 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20165 // CHECK17:       omp.precond.then:
20166 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20167 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20168 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
20169 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20170 // CHECK17-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
20171 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20172 // CHECK17-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
20173 // CHECK17-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
20174 // CHECK17-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
20175 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20176 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20177 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20178 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
20179 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20180 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20181 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20182 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
20183 // CHECK17-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20184 // CHECK17:       cond.true:
20185 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20186 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20187 // CHECK17:       cond.false:
20188 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20189 // CHECK17-NEXT:    br label [[COND_END]]
20190 // CHECK17:       cond.end:
20191 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
20192 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20193 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20194 // CHECK17-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
20195 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20196 // CHECK17:       omp.inner.for.cond:
20197 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20198 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
20199 // CHECK17-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
20200 // CHECK17-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20201 // CHECK17:       omp.inner.for.body:
20202 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20203 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
20204 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20205 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !25
20206 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !25
20207 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
20208 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
20209 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
20210 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20211 // CHECK17:       omp.body.continue:
20212 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20213 // CHECK17:       omp.inner.for.inc:
20214 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20215 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
20216 // CHECK17-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20217 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
20218 // CHECK17:       omp.inner.for.end:
20219 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20220 // CHECK17:       omp.loop.exit:
20221 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20222 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
20223 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
20224 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20225 // CHECK17-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
20226 // CHECK17-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20227 // CHECK17:       .omp.final.then:
20228 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20229 // CHECK17-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
20230 // CHECK17-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
20231 // CHECK17-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
20232 // CHECK17-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
20233 // CHECK17-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
20234 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20235 // CHECK17:       .omp.final.done:
20236 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20237 // CHECK17:       omp.precond.end:
20238 // CHECK17-NEXT:    ret void
20239 //
20240 //
20241 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
20242 // CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20243 // CHECK17-NEXT:  entry:
20244 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20245 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20246 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20247 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20248 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20249 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20250 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20251 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20252 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20253 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20254 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20255 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20256 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20257 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20258 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20259 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20260 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
20261 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
20262 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
20263 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
20264 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
20265 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
20266 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
20267 // CHECK17-NEXT:    ret void
20268 //
20269 //
20270 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5
20271 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20272 // CHECK17-NEXT:  entry:
20273 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20274 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20275 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20276 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20277 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20278 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20279 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20280 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20281 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20282 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20283 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20284 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20285 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20286 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20287 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20288 // CHECK17-NEXT:    [[I5:%.*]] = alloca i32, align 4
20289 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20290 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20291 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20292 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20293 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20294 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20295 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20296 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20297 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20298 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20299 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20300 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20301 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20302 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20303 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20304 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20305 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20306 // CHECK17-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
20307 // CHECK17-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20308 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20309 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20310 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20311 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20312 // CHECK17:       omp.precond.then:
20313 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20314 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20315 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
20316 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20317 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20318 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
20319 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20320 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
20321 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
20322 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20323 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20324 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
20325 // CHECK17-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20326 // CHECK17:       cond.true:
20327 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20328 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20329 // CHECK17:       cond.false:
20330 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20331 // CHECK17-NEXT:    br label [[COND_END]]
20332 // CHECK17:       cond.end:
20333 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
20334 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20335 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20336 // CHECK17-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
20337 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20338 // CHECK17:       omp.inner.for.cond:
20339 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20340 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
20341 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
20342 // CHECK17-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
20343 // CHECK17-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20344 // CHECK17:       omp.inner.for.body:
20345 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
20346 // CHECK17-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
20347 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20348 // CHECK17-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
20349 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !28
20350 // CHECK17-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20351 // CHECK17-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4, !llvm.access.group !28
20352 // CHECK17-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !28
20353 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !28
20354 // CHECK17-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
20355 // CHECK17-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28
20356 // CHECK17-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28
20357 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28
20358 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20359 // CHECK17:       omp.inner.for.inc:
20360 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20361 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
20362 // CHECK17-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
20363 // CHECK17-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20364 // CHECK17-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
20365 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
20366 // CHECK17-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
20367 // CHECK17-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
20368 // CHECK17-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20369 // CHECK17-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
20370 // CHECK17-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
20371 // CHECK17-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20372 // CHECK17-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20373 // CHECK17-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
20374 // CHECK17-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
20375 // CHECK17-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
20376 // CHECK17:       cond.true14:
20377 // CHECK17-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
20378 // CHECK17-NEXT:    br label [[COND_END16:%.*]]
20379 // CHECK17:       cond.false15:
20380 // CHECK17-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20381 // CHECK17-NEXT:    br label [[COND_END16]]
20382 // CHECK17:       cond.end16:
20383 // CHECK17-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
20384 // CHECK17-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
20385 // CHECK17-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
20386 // CHECK17-NEXT:    store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20387 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
20388 // CHECK17:       omp.inner.for.end:
20389 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20390 // CHECK17:       omp.loop.exit:
20391 // CHECK17-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20392 // CHECK17-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
20393 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
20394 // CHECK17-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20395 // CHECK17-NEXT:    [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
20396 // CHECK17-NEXT:    br i1 [[TMP38]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20397 // CHECK17:       .omp.final.then:
20398 // CHECK17-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20399 // CHECK17-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP39]], 0
20400 // CHECK17-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
20401 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV19]], 1
20402 // CHECK17-NEXT:    [[ADD20:%.*]] = add nsw i32 0, [[MUL]]
20403 // CHECK17-NEXT:    store i32 [[ADD20]], i32* [[I5]], align 4
20404 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20405 // CHECK17:       .omp.final.done:
20406 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20407 // CHECK17:       omp.precond.end:
20408 // CHECK17-NEXT:    ret void
20409 //
20410 //
20411 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6
20412 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20413 // CHECK17-NEXT:  entry:
20414 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20415 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20416 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20417 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20418 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20419 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20420 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20421 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20422 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20423 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20424 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20425 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20426 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20427 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20428 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20429 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20430 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20431 // CHECK17-NEXT:    [[I7:%.*]] = alloca i32, align 4
20432 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20433 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20434 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20435 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20436 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20437 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20438 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20439 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20440 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20441 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20442 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20443 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20444 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20445 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20446 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20447 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20448 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20449 // CHECK17-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
20450 // CHECK17-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20451 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20452 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20453 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20454 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20455 // CHECK17:       omp.precond.then:
20456 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20457 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20458 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
20459 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20460 // CHECK17-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
20461 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20462 // CHECK17-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
20463 // CHECK17-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
20464 // CHECK17-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
20465 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20466 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20467 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20468 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
20469 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20470 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20471 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20472 // CHECK17-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
20473 // CHECK17-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20474 // CHECK17:       cond.true:
20475 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20476 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20477 // CHECK17:       cond.false:
20478 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20479 // CHECK17-NEXT:    br label [[COND_END]]
20480 // CHECK17:       cond.end:
20481 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
20482 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20483 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20484 // CHECK17-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
20485 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20486 // CHECK17:       omp.inner.for.cond:
20487 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
20488 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
20489 // CHECK17-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
20490 // CHECK17-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20491 // CHECK17:       omp.inner.for.body:
20492 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
20493 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
20494 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20495 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !31
20496 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !31
20497 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
20498 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
20499 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !31
20500 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20501 // CHECK17:       omp.body.continue:
20502 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20503 // CHECK17:       omp.inner.for.inc:
20504 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
20505 // CHECK17-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
20506 // CHECK17-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
20507 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
20508 // CHECK17:       omp.inner.for.end:
20509 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20510 // CHECK17:       omp.loop.exit:
20511 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20512 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
20513 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
20514 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20515 // CHECK17-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
20516 // CHECK17-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20517 // CHECK17:       .omp.final.then:
20518 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20519 // CHECK17-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP24]], 0
20520 // CHECK17-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
20521 // CHECK17-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
20522 // CHECK17-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
20523 // CHECK17-NEXT:    store i32 [[ADD14]], i32* [[I7]], align 4
20524 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20525 // CHECK17:       .omp.final.done:
20526 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20527 // CHECK17:       omp.precond.end:
20528 // CHECK17-NEXT:    ret void
20529 //
20530 //
20531 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
20532 // CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
20533 // CHECK17-NEXT:  entry:
20534 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20535 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20536 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20537 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20538 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20539 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20540 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20541 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20542 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20543 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20544 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20545 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20546 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
20547 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
20548 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
20549 // CHECK17-NEXT:    ret void
20550 //
20551 //
20552 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8
20553 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
20554 // CHECK17-NEXT:  entry:
20555 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20556 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20557 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20558 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20559 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20560 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20561 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20562 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20563 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20564 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20565 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20566 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20567 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20568 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20569 // CHECK17-NEXT:    [[I3:%.*]] = alloca i32, align 4
20570 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20571 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20572 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20573 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20574 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20575 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20576 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20577 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20578 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20579 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20580 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
20581 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20582 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20583 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20584 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20585 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20586 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20587 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20588 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20589 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20590 // CHECK17:       omp.precond.then:
20591 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20592 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20593 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
20594 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20595 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20596 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20597 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
20598 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20599 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20600 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20601 // CHECK17-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
20602 // CHECK17-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20603 // CHECK17:       cond.true:
20604 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20605 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20606 // CHECK17:       cond.false:
20607 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20608 // CHECK17-NEXT:    br label [[COND_END]]
20609 // CHECK17:       cond.end:
20610 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
20611 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20612 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20613 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
20614 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20615 // CHECK17:       omp.inner.for.cond:
20616 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
20617 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
20618 // CHECK17-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
20619 // CHECK17-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20620 // CHECK17:       omp.inner.for.body:
20621 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !34
20622 // CHECK17-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
20623 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
20624 // CHECK17-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
20625 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !34
20626 // CHECK17-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20627 // CHECK17-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34
20628 // CHECK17-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34
20629 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34
20630 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20631 // CHECK17:       omp.inner.for.inc:
20632 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
20633 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !34
20634 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
20635 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
20636 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
20637 // CHECK17:       omp.inner.for.end:
20638 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20639 // CHECK17:       omp.loop.exit:
20640 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20641 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
20642 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
20643 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20644 // CHECK17-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
20645 // CHECK17-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20646 // CHECK17:       .omp.final.then:
20647 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20648 // CHECK17-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
20649 // CHECK17-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
20650 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
20651 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
20652 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
20653 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20654 // CHECK17:       .omp.final.done:
20655 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20656 // CHECK17:       omp.precond.end:
20657 // CHECK17-NEXT:    ret void
20658 //
20659 //
20660 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9
20661 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
20662 // CHECK17-NEXT:  entry:
20663 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20664 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20665 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20666 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20667 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20668 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20669 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20670 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20671 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20672 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20673 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20674 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20675 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20676 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20677 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20678 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20679 // CHECK17-NEXT:    [[I5:%.*]] = alloca i32, align 4
20680 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20681 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20682 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20683 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20684 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20685 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20686 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20687 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20688 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20689 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20690 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20691 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
20692 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20693 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20694 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20695 // CHECK17-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20696 // CHECK17-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20697 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20698 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20699 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20700 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20701 // CHECK17:       omp.precond.then:
20702 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20703 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20704 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
20705 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20706 // CHECK17-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
20707 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20708 // CHECK17-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
20709 // CHECK17-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
20710 // CHECK17-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
20711 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20712 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20713 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20714 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20715 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20716 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
20717 // CHECK17-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
20718 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20719 // CHECK17:       omp.dispatch.cond:
20720 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20721 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
20722 // CHECK17-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20723 // CHECK17-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
20724 // CHECK17-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20725 // CHECK17:       omp.dispatch.body:
20726 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20727 // CHECK17-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
20728 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20729 // CHECK17:       omp.inner.for.cond:
20730 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
20731 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
20732 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
20733 // CHECK17-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20734 // CHECK17:       omp.inner.for.body:
20735 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
20736 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
20737 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20738 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !37
20739 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !37
20740 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
20741 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
20742 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !37
20743 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20744 // CHECK17:       omp.body.continue:
20745 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20746 // CHECK17:       omp.inner.for.inc:
20747 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
20748 // CHECK17-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
20749 // CHECK17-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
20750 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
20751 // CHECK17:       omp.inner.for.end:
20752 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20753 // CHECK17:       omp.dispatch.inc:
20754 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
20755 // CHECK17:       omp.dispatch.end:
20756 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20757 // CHECK17-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
20758 // CHECK17-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20759 // CHECK17:       .omp.final.then:
20760 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20761 // CHECK17-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0
20762 // CHECK17-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
20763 // CHECK17-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
20764 // CHECK17-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
20765 // CHECK17-NEXT:    store i32 [[ADD11]], i32* [[I5]], align 4
20766 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20767 // CHECK17:       .omp.final.done:
20768 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20769 // CHECK17:       omp.precond.end:
20770 // CHECK17-NEXT:    ret void
20771 //
20772 //
20773 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
20774 // CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20775 // CHECK17-NEXT:  entry:
20776 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20777 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20778 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20779 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20780 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20781 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20782 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20783 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20784 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20785 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20786 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20787 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20788 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20789 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20790 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20791 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20792 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
20793 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
20794 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
20795 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
20796 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
20797 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
20798 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
20799 // CHECK17-NEXT:    ret void
20800 //
20801 //
20802 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11
20803 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20804 // CHECK17-NEXT:  entry:
20805 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20806 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20807 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20808 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20809 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20810 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20811 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20812 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20813 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20814 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20815 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20816 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20817 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20818 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20819 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20820 // CHECK17-NEXT:    [[I5:%.*]] = alloca i32, align 4
20821 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20822 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20823 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20824 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20825 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20826 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20827 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20828 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20829 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20830 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20831 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20832 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20833 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20834 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20835 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20836 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20837 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20838 // CHECK17-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
20839 // CHECK17-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20840 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20841 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20842 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20843 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20844 // CHECK17:       omp.precond.then:
20845 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20846 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20847 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
20848 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20849 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20850 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20851 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
20852 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20853 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20854 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20855 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
20856 // CHECK17-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20857 // CHECK17:       cond.true:
20858 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20859 // CHECK17-NEXT:    br label [[COND_END:%.*]]
20860 // CHECK17:       cond.false:
20861 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20862 // CHECK17-NEXT:    br label [[COND_END]]
20863 // CHECK17:       cond.end:
20864 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
20865 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20866 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20867 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
20868 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20869 // CHECK17:       omp.inner.for.cond:
20870 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
20871 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
20872 // CHECK17-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
20873 // CHECK17-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20874 // CHECK17:       omp.inner.for.body:
20875 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !40
20876 // CHECK17-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
20877 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
20878 // CHECK17-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
20879 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !40
20880 // CHECK17-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20881 // CHECK17-NEXT:    store i32 [[TMP19]], i32* [[CONV8]], align 4, !llvm.access.group !40
20882 // CHECK17-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !40
20883 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !40
20884 // CHECK17-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
20885 // CHECK17-NEXT:    store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40
20886 // CHECK17-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40
20887 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40
20888 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20889 // CHECK17:       omp.inner.for.inc:
20890 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
20891 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !40
20892 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
20893 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
20894 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
20895 // CHECK17:       omp.inner.for.end:
20896 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20897 // CHECK17:       omp.loop.exit:
20898 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20899 // CHECK17-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
20900 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
20901 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20902 // CHECK17-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
20903 // CHECK17-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20904 // CHECK17:       .omp.final.then:
20905 // CHECK17-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20906 // CHECK17-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP29]], 0
20907 // CHECK17-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
20908 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV11]], 1
20909 // CHECK17-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL]]
20910 // CHECK17-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
20911 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20912 // CHECK17:       .omp.final.done:
20913 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
20914 // CHECK17:       omp.precond.end:
20915 // CHECK17-NEXT:    ret void
20916 //
20917 //
20918 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12
20919 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
20920 // CHECK17-NEXT:  entry:
20921 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20922 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20923 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20924 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20925 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20926 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20927 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
20928 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20929 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20930 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20931 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20932 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20933 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
20934 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20935 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20936 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20937 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20938 // CHECK17-NEXT:    [[I7:%.*]] = alloca i32, align 4
20939 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20940 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20941 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20942 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20943 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20944 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20945 // CHECK17-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
20946 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20947 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20948 // CHECK17-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20949 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
20950 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20951 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
20952 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20953 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20954 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
20955 // CHECK17-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20956 // CHECK17-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
20957 // CHECK17-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20958 // CHECK17-NEXT:    store i32 0, i32* [[I]], align 4
20959 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20960 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
20961 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20962 // CHECK17:       omp.precond.then:
20963 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20964 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20965 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
20966 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20967 // CHECK17-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
20968 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20969 // CHECK17-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
20970 // CHECK17-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
20971 // CHECK17-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
20972 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20973 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20974 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
20975 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20976 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20977 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20978 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
20979 // CHECK17-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
20980 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20981 // CHECK17:       omp.dispatch.cond:
20982 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20983 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
20984 // CHECK17-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20985 // CHECK17-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
20986 // CHECK17-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20987 // CHECK17:       omp.dispatch.body:
20988 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20989 // CHECK17-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
20990 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20991 // CHECK17:       omp.inner.for.cond:
20992 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
20993 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43
20994 // CHECK17-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
20995 // CHECK17-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20996 // CHECK17:       omp.inner.for.body:
20997 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
20998 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
20999 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21000 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !43
21001 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !43
21002 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
21003 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
21004 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !43
21005 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21006 // CHECK17:       omp.body.continue:
21007 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21008 // CHECK17:       omp.inner.for.inc:
21009 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
21010 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
21011 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
21012 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
21013 // CHECK17:       omp.inner.for.end:
21014 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
21015 // CHECK17:       omp.dispatch.inc:
21016 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
21017 // CHECK17:       omp.dispatch.end:
21018 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21019 // CHECK17-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
21020 // CHECK17-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21021 // CHECK17:       .omp.final.then:
21022 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21023 // CHECK17-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP24]], 0
21024 // CHECK17-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21025 // CHECK17-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21026 // CHECK17-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21027 // CHECK17-NEXT:    store i32 [[ADD13]], i32* [[I7]], align 4
21028 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21029 // CHECK17:       .omp.final.done:
21030 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
21031 // CHECK17:       omp.precond.end:
21032 // CHECK17-NEXT:    ret void
21033 //
21034 //
21035 // CHECK17-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
21036 // CHECK17-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
21037 // CHECK17-NEXT:  entry:
21038 // CHECK17-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
21039 // CHECK17-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
21040 // CHECK17-NEXT:    [[M:%.*]] = alloca i32, align 4
21041 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
21042 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
21043 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
21044 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21045 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
21046 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
21047 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
21048 // CHECK17-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
21049 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21050 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21051 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
21052 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
21053 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
21054 // CHECK17-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
21055 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
21056 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
21057 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
21058 // CHECK17-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
21059 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
21060 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
21061 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
21062 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
21063 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
21064 // CHECK17-NEXT:    [[_TMP25:%.*]] = alloca i32, align 4
21065 // CHECK17-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
21066 // CHECK17-NEXT:    store i32 10, i32* [[M]], align 4
21067 // CHECK17-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
21068 // CHECK17-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
21069 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
21070 // CHECK17-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
21071 // CHECK17-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
21072 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
21073 // CHECK17-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
21074 // CHECK17-NEXT:    store i8* null, i8** [[TMP4]], align 8
21075 // CHECK17-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
21076 // CHECK17-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
21077 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
21078 // CHECK17-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
21079 // CHECK17-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
21080 // CHECK17-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
21081 // CHECK17:       omp_offload.failed:
21082 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
21083 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT]]
21084 // CHECK17:       omp_offload.cont:
21085 // CHECK17-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
21086 // CHECK17-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
21087 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
21088 // CHECK17-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
21089 // CHECK17-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
21090 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
21091 // CHECK17-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
21092 // CHECK17-NEXT:    store i8* null, i8** [[TMP13]], align 8
21093 // CHECK17-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
21094 // CHECK17-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
21095 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
21096 // CHECK17-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
21097 // CHECK17-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
21098 // CHECK17-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
21099 // CHECK17:       omp_offload.failed5:
21100 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
21101 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
21102 // CHECK17:       omp_offload.cont6:
21103 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
21104 // CHECK17-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
21105 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21106 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21107 // CHECK17-NEXT:    store i32 [[TMP19]], i32* [[CONV]], align 4
21108 // CHECK17-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
21109 // CHECK17-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
21110 // CHECK17-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
21111 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
21112 // CHECK17-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
21113 // CHECK17-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
21114 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
21115 // CHECK17-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
21116 // CHECK17-NEXT:    store i8* null, i8** [[TMP25]], align 8
21117 // CHECK17-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
21118 // CHECK17-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
21119 // CHECK17-NEXT:    store i64 [[TMP20]], i64* [[TMP27]], align 8
21120 // CHECK17-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
21121 // CHECK17-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
21122 // CHECK17-NEXT:    store i64 [[TMP20]], i64* [[TMP29]], align 8
21123 // CHECK17-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
21124 // CHECK17-NEXT:    store i8* null, i8** [[TMP30]], align 8
21125 // CHECK17-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
21126 // CHECK17-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
21127 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
21128 // CHECK17-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
21129 // CHECK17-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
21130 // CHECK17-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
21131 // CHECK17:       omp_offload.failed11:
21132 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
21133 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
21134 // CHECK17:       omp_offload.cont12:
21135 // CHECK17-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
21136 // CHECK17-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
21137 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
21138 // CHECK17-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
21139 // CHECK17-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
21140 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
21141 // CHECK17-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
21142 // CHECK17-NEXT:    store i8* null, i8** [[TMP39]], align 8
21143 // CHECK17-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
21144 // CHECK17-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
21145 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
21146 // CHECK17-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
21147 // CHECK17-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
21148 // CHECK17-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
21149 // CHECK17:       omp_offload.failed17:
21150 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
21151 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
21152 // CHECK17:       omp_offload.cont18:
21153 // CHECK17-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
21154 // CHECK17-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
21155 // CHECK17-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
21156 // CHECK17-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
21157 // CHECK17-NEXT:    store i32 [[TMP45]], i32* [[CONV21]], align 4
21158 // CHECK17-NEXT:    [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
21159 // CHECK17-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
21160 // CHECK17-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
21161 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
21162 // CHECK17-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
21163 // CHECK17-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
21164 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
21165 // CHECK17-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
21166 // CHECK17-NEXT:    store i8* null, i8** [[TMP51]], align 8
21167 // CHECK17-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
21168 // CHECK17-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
21169 // CHECK17-NEXT:    store i64 [[TMP46]], i64* [[TMP53]], align 8
21170 // CHECK17-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
21171 // CHECK17-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
21172 // CHECK17-NEXT:    store i64 [[TMP46]], i64* [[TMP55]], align 8
21173 // CHECK17-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
21174 // CHECK17-NEXT:    store i8* null, i8** [[TMP56]], align 8
21175 // CHECK17-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
21176 // CHECK17-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
21177 // CHECK17-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
21178 // CHECK17-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
21179 // CHECK17-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
21180 // CHECK17-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
21181 // CHECK17:       omp_offload.failed26:
21182 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
21183 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT27]]
21184 // CHECK17:       omp_offload.cont27:
21185 // CHECK17-NEXT:    ret i32 0
21186 //
21187 //
21188 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
21189 // CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21190 // CHECK17-NEXT:  entry:
21191 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21192 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21193 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21194 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
21195 // CHECK17-NEXT:    ret void
21196 //
21197 //
21198 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14
21199 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21200 // CHECK17-NEXT:  entry:
21201 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21202 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21203 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21204 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21205 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21206 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21207 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21208 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21209 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21210 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21211 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21212 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21213 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21214 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21215 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21216 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
21217 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21218 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21219 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21220 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21221 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21222 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21223 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21224 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21225 // CHECK17:       cond.true:
21226 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21227 // CHECK17:       cond.false:
21228 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21229 // CHECK17-NEXT:    br label [[COND_END]]
21230 // CHECK17:       cond.end:
21231 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21232 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21233 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21234 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21235 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21236 // CHECK17:       omp.inner.for.cond:
21237 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
21238 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
21239 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21240 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21241 // CHECK17:       omp.inner.for.body:
21242 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !46
21243 // CHECK17-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
21244 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
21245 // CHECK17-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
21246 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46
21247 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21248 // CHECK17:       omp.inner.for.inc:
21249 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
21250 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !46
21251 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
21252 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
21253 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
21254 // CHECK17:       omp.inner.for.end:
21255 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21256 // CHECK17:       omp.loop.exit:
21257 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21258 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21259 // CHECK17-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
21260 // CHECK17-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21261 // CHECK17:       .omp.final.then:
21262 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21263 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21264 // CHECK17:       .omp.final.done:
21265 // CHECK17-NEXT:    ret void
21266 //
21267 //
21268 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15
21269 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21270 // CHECK17-NEXT:  entry:
21271 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21272 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21273 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21274 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21275 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21276 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21277 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21278 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21279 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21280 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21281 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21282 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21283 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21284 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21285 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21286 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21287 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21288 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21289 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21290 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21291 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21292 // CHECK17-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
21293 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21294 // CHECK17-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
21295 // CHECK17-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
21296 // CHECK17-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
21297 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21298 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21299 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21300 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
21301 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21302 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21303 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
21304 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21305 // CHECK17:       cond.true:
21306 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21307 // CHECK17:       cond.false:
21308 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21309 // CHECK17-NEXT:    br label [[COND_END]]
21310 // CHECK17:       cond.end:
21311 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
21312 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21313 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21314 // CHECK17-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
21315 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21316 // CHECK17:       omp.inner.for.cond:
21317 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
21318 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !49
21319 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
21320 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21321 // CHECK17:       omp.inner.for.body:
21322 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
21323 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
21324 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21325 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !49
21326 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !49
21327 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
21328 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21329 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !49
21330 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21331 // CHECK17:       omp.body.continue:
21332 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21333 // CHECK17:       omp.inner.for.inc:
21334 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
21335 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
21336 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
21337 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
21338 // CHECK17:       omp.inner.for.end:
21339 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21340 // CHECK17:       omp.loop.exit:
21341 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
21342 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21343 // CHECK17-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
21344 // CHECK17-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21345 // CHECK17:       .omp.final.then:
21346 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21347 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21348 // CHECK17:       .omp.final.done:
21349 // CHECK17-NEXT:    ret void
21350 //
21351 //
21352 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
21353 // CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21354 // CHECK17-NEXT:  entry:
21355 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21356 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21357 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21358 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
21359 // CHECK17-NEXT:    ret void
21360 //
21361 //
21362 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17
21363 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21364 // CHECK17-NEXT:  entry:
21365 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21366 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21367 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21368 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21369 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21370 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21371 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21372 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21373 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21374 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21375 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21376 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21377 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21378 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21379 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21380 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
21381 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21382 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21383 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21384 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21385 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21386 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21387 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21388 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21389 // CHECK17:       cond.true:
21390 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21391 // CHECK17:       cond.false:
21392 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21393 // CHECK17-NEXT:    br label [[COND_END]]
21394 // CHECK17:       cond.end:
21395 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21396 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21397 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21398 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21399 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21400 // CHECK17:       omp.inner.for.cond:
21401 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
21402 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
21403 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21404 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21405 // CHECK17:       omp.inner.for.body:
21406 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !52
21407 // CHECK17-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
21408 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
21409 // CHECK17-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
21410 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52
21411 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21412 // CHECK17:       omp.inner.for.inc:
21413 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
21414 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !52
21415 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
21416 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
21417 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
21418 // CHECK17:       omp.inner.for.end:
21419 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21420 // CHECK17:       omp.loop.exit:
21421 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21422 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21423 // CHECK17-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
21424 // CHECK17-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21425 // CHECK17:       .omp.final.then:
21426 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21427 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21428 // CHECK17:       .omp.final.done:
21429 // CHECK17-NEXT:    ret void
21430 //
21431 //
21432 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18
21433 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21434 // CHECK17-NEXT:  entry:
21435 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21436 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21437 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21438 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21439 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21440 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21441 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21442 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21443 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21444 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21445 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21446 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21447 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21448 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21449 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21450 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21451 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21452 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21453 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21454 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21455 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21456 // CHECK17-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
21457 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21458 // CHECK17-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
21459 // CHECK17-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
21460 // CHECK17-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
21461 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21462 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21463 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21464 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
21465 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21466 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21467 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
21468 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21469 // CHECK17:       cond.true:
21470 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21471 // CHECK17:       cond.false:
21472 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21473 // CHECK17-NEXT:    br label [[COND_END]]
21474 // CHECK17:       cond.end:
21475 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
21476 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21477 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21478 // CHECK17-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
21479 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21480 // CHECK17:       omp.inner.for.cond:
21481 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
21482 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !55
21483 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
21484 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21485 // CHECK17:       omp.inner.for.body:
21486 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
21487 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
21488 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21489 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !55
21490 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !55
21491 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
21492 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21493 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !55
21494 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21495 // CHECK17:       omp.body.continue:
21496 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21497 // CHECK17:       omp.inner.for.inc:
21498 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
21499 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
21500 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
21501 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
21502 // CHECK17:       omp.inner.for.end:
21503 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21504 // CHECK17:       omp.loop.exit:
21505 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
21506 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21507 // CHECK17-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
21508 // CHECK17-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21509 // CHECK17:       .omp.final.then:
21510 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21511 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21512 // CHECK17:       .omp.final.done:
21513 // CHECK17-NEXT:    ret void
21514 //
21515 //
21516 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
21517 // CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21518 // CHECK17-NEXT:  entry:
21519 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21520 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21521 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21522 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21523 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21524 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21525 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21526 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21527 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21528 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
21529 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
21530 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
21531 // CHECK17-NEXT:    ret void
21532 //
21533 //
21534 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21
21535 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21536 // CHECK17-NEXT:  entry:
21537 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21538 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21539 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21540 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21541 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21542 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21543 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21544 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21545 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21546 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21547 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21548 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21549 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21550 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21551 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21552 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21553 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21554 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21555 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21556 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
21557 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21558 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21559 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21560 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21561 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21562 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21563 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21564 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21565 // CHECK17:       cond.true:
21566 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21567 // CHECK17:       cond.false:
21568 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21569 // CHECK17-NEXT:    br label [[COND_END]]
21570 // CHECK17:       cond.end:
21571 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21572 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21573 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21574 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21575 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21576 // CHECK17:       omp.inner.for.cond:
21577 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
21578 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
21579 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21580 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21581 // CHECK17:       omp.inner.for.body:
21582 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !58
21583 // CHECK17-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
21584 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
21585 // CHECK17-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
21586 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !58
21587 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21588 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58
21589 // CHECK17-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58
21590 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58
21591 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21592 // CHECK17:       omp.inner.for.inc:
21593 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
21594 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !58
21595 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
21596 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
21597 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
21598 // CHECK17:       omp.inner.for.end:
21599 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21600 // CHECK17:       omp.loop.exit:
21601 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21602 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21603 // CHECK17-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
21604 // CHECK17-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21605 // CHECK17:       .omp.final.then:
21606 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21607 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21608 // CHECK17:       .omp.final.done:
21609 // CHECK17-NEXT:    ret void
21610 //
21611 //
21612 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22
21613 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21614 // CHECK17-NEXT:  entry:
21615 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21616 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21617 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21618 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21619 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21620 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21621 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21622 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21623 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21624 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21625 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21626 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21627 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21628 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21629 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21630 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21631 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21632 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21633 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21634 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21635 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21636 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21637 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21638 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21639 // CHECK17-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
21640 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21641 // CHECK17-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
21642 // CHECK17-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
21643 // CHECK17-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
21644 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21645 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21646 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
21647 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21648 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
21649 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
21650 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
21651 // CHECK17:       omp.dispatch.cond:
21652 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21653 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21654 // CHECK17-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
21655 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
21656 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21657 // CHECK17:       cond.true:
21658 // CHECK17-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21659 // CHECK17-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
21660 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21661 // CHECK17:       cond.false:
21662 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21663 // CHECK17-NEXT:    br label [[COND_END]]
21664 // CHECK17:       cond.end:
21665 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
21666 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21667 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21668 // CHECK17-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
21669 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21670 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21671 // CHECK17-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
21672 // CHECK17-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
21673 // CHECK17:       omp.dispatch.body:
21674 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21675 // CHECK17:       omp.inner.for.cond:
21676 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
21677 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !61
21678 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
21679 // CHECK17-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21680 // CHECK17:       omp.inner.for.body:
21681 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
21682 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
21683 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21684 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !61
21685 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !61
21686 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
21687 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21688 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !61
21689 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21690 // CHECK17:       omp.body.continue:
21691 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21692 // CHECK17:       omp.inner.for.inc:
21693 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
21694 // CHECK17-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
21695 // CHECK17-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
21696 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
21697 // CHECK17:       omp.inner.for.end:
21698 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
21699 // CHECK17:       omp.dispatch.inc:
21700 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21701 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21702 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
21703 // CHECK17-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
21704 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21705 // CHECK17-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21706 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
21707 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
21708 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
21709 // CHECK17:       omp.dispatch.end:
21710 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
21711 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21712 // CHECK17-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
21713 // CHECK17-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21714 // CHECK17:       .omp.final.then:
21715 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21716 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21717 // CHECK17:       .omp.final.done:
21718 // CHECK17-NEXT:    ret void
21719 //
21720 //
21721 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
21722 // CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21723 // CHECK17-NEXT:  entry:
21724 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21725 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21726 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21727 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
21728 // CHECK17-NEXT:    ret void
21729 //
21730 //
21731 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25
21732 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21733 // CHECK17-NEXT:  entry:
21734 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21735 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21736 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21737 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21738 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21739 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21740 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21741 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21742 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21743 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21744 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21745 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21746 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21747 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21748 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21749 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
21750 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21751 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21752 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21753 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21754 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21755 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21756 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21757 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21758 // CHECK17:       cond.true:
21759 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21760 // CHECK17:       cond.false:
21761 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21762 // CHECK17-NEXT:    br label [[COND_END]]
21763 // CHECK17:       cond.end:
21764 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21765 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21766 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21767 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21768 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21769 // CHECK17:       omp.inner.for.cond:
21770 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
21771 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
21772 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21773 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21774 // CHECK17:       omp.inner.for.body:
21775 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !64
21776 // CHECK17-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
21777 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
21778 // CHECK17-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
21779 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64
21780 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21781 // CHECK17:       omp.inner.for.inc:
21782 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
21783 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !64
21784 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
21785 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
21786 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
21787 // CHECK17:       omp.inner.for.end:
21788 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21789 // CHECK17:       omp.loop.exit:
21790 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21791 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21792 // CHECK17-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
21793 // CHECK17-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21794 // CHECK17:       .omp.final.then:
21795 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21796 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21797 // CHECK17:       .omp.final.done:
21798 // CHECK17-NEXT:    ret void
21799 //
21800 //
21801 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26
21802 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
21803 // CHECK17-NEXT:  entry:
21804 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21805 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21806 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21807 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21808 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21809 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21810 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21811 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21812 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21813 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21814 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21815 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21816 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21817 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21818 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21819 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21820 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21821 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21822 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21823 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21824 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21825 // CHECK17-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
21826 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21827 // CHECK17-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
21828 // CHECK17-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
21829 // CHECK17-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
21830 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21831 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21832 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21833 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21834 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21835 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
21836 // CHECK17-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
21837 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
21838 // CHECK17:       omp.dispatch.cond:
21839 // CHECK17-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
21840 // CHECK17-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
21841 // CHECK17-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
21842 // CHECK17:       omp.dispatch.body:
21843 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21844 // CHECK17-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
21845 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21846 // CHECK17:       omp.inner.for.cond:
21847 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
21848 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !67
21849 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
21850 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21851 // CHECK17:       omp.inner.for.body:
21852 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
21853 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
21854 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21855 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !67
21856 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !67
21857 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
21858 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21859 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !67
21860 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21861 // CHECK17:       omp.body.continue:
21862 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21863 // CHECK17:       omp.inner.for.inc:
21864 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
21865 // CHECK17-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
21866 // CHECK17-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
21867 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
21868 // CHECK17:       omp.inner.for.end:
21869 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
21870 // CHECK17:       omp.dispatch.inc:
21871 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
21872 // CHECK17:       omp.dispatch.end:
21873 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21874 // CHECK17-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
21875 // CHECK17-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21876 // CHECK17:       .omp.final.then:
21877 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21878 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21879 // CHECK17:       .omp.final.done:
21880 // CHECK17-NEXT:    ret void
21881 //
21882 //
21883 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
21884 // CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21885 // CHECK17-NEXT:  entry:
21886 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21887 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21888 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21889 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21890 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21891 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21892 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21893 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21894 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21895 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
21896 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
21897 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
21898 // CHECK17-NEXT:    ret void
21899 //
21900 //
21901 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29
21902 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21903 // CHECK17-NEXT:  entry:
21904 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21905 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21906 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21907 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21908 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21909 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21910 // CHECK17-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21911 // CHECK17-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21912 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21913 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21914 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21915 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21916 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21917 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21918 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
21919 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21920 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
21921 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21922 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21923 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
21924 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21925 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21926 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21927 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21928 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21929 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21930 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21931 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21932 // CHECK17:       cond.true:
21933 // CHECK17-NEXT:    br label [[COND_END:%.*]]
21934 // CHECK17:       cond.false:
21935 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21936 // CHECK17-NEXT:    br label [[COND_END]]
21937 // CHECK17:       cond.end:
21938 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21939 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21940 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21941 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21942 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21943 // CHECK17:       omp.inner.for.cond:
21944 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
21945 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
21946 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21947 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21948 // CHECK17:       omp.inner.for.body:
21949 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !70
21950 // CHECK17-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
21951 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
21952 // CHECK17-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
21953 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !70
21954 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21955 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70
21956 // CHECK17-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70
21957 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70
21958 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21959 // CHECK17:       omp.inner.for.inc:
21960 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
21961 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !70
21962 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
21963 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
21964 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
21965 // CHECK17:       omp.inner.for.end:
21966 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21967 // CHECK17:       omp.loop.exit:
21968 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21969 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21970 // CHECK17-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
21971 // CHECK17-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21972 // CHECK17:       .omp.final.then:
21973 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
21974 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21975 // CHECK17:       .omp.final.done:
21976 // CHECK17-NEXT:    ret void
21977 //
21978 //
21979 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30
21980 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
21981 // CHECK17-NEXT:  entry:
21982 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21983 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21984 // CHECK17-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21985 // CHECK17-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21986 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
21987 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21988 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21989 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21990 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21991 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21992 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21993 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21994 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
21995 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21996 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21997 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21998 // CHECK17-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21999 // CHECK17-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
22000 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
22001 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
22002 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
22003 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22004 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22005 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
22006 // CHECK17-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
22007 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
22008 // CHECK17-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
22009 // CHECK17-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
22010 // CHECK17-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
22011 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22012 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22013 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
22014 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22015 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22016 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22017 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
22018 // CHECK17-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
22019 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22020 // CHECK17:       omp.dispatch.cond:
22021 // CHECK17-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
22022 // CHECK17-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
22023 // CHECK17-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22024 // CHECK17:       omp.dispatch.body:
22025 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22026 // CHECK17-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
22027 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22028 // CHECK17:       omp.inner.for.cond:
22029 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
22030 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !73
22031 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
22032 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22033 // CHECK17:       omp.inner.for.body:
22034 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
22035 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
22036 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22037 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !73
22038 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !73
22039 // CHECK17-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
22040 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
22041 // CHECK17-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !73
22042 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22043 // CHECK17:       omp.body.continue:
22044 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22045 // CHECK17:       omp.inner.for.inc:
22046 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
22047 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
22048 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
22049 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
22050 // CHECK17:       omp.inner.for.end:
22051 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22052 // CHECK17:       omp.dispatch.inc:
22053 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
22054 // CHECK17:       omp.dispatch.end:
22055 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22056 // CHECK17-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
22057 // CHECK17-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22058 // CHECK17:       .omp.final.then:
22059 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
22060 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22061 // CHECK17:       .omp.final.done:
22062 // CHECK17-NEXT:    ret void
22063 //
22064 //
22065 // CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
22066 // CHECK17-SAME: () #[[ATTR5:[0-9]+]] {
22067 // CHECK17-NEXT:  entry:
22068 // CHECK17-NEXT:    call void @__tgt_register_requires(i64 1)
22069 // CHECK17-NEXT:    ret void
22070 //
22071 //
22072 // CHECK18-LABEL: define {{[^@]+}}@main
22073 // CHECK18-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
22074 // CHECK18-NEXT:  entry:
22075 // CHECK18-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
22076 // CHECK18-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
22077 // CHECK18-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
22078 // CHECK18-NEXT:    [[N:%.*]] = alloca i32, align 4
22079 // CHECK18-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
22080 // CHECK18-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
22081 // CHECK18-NEXT:    [[M:%.*]] = alloca i32, align 4
22082 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22083 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
22084 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
22085 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
22086 // CHECK18-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
22087 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22088 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22089 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22090 // CHECK18-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
22091 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
22092 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
22093 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
22094 // CHECK18-NEXT:    [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
22095 // CHECK18-NEXT:    [[_TMP9:%.*]] = alloca i32, align 4
22096 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
22097 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
22098 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
22099 // CHECK18-NEXT:    [[N_CASTED19:%.*]] = alloca i64, align 8
22100 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
22101 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
22102 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
22103 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
22104 // CHECK18-NEXT:    [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
22105 // CHECK18-NEXT:    [[_TMP26:%.*]] = alloca i32, align 4
22106 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
22107 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
22108 // CHECK18-NEXT:    [[N_CASTED35:%.*]] = alloca i64, align 8
22109 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
22110 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
22111 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
22112 // CHECK18-NEXT:    [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
22113 // CHECK18-NEXT:    [[_TMP41:%.*]] = alloca i32, align 4
22114 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
22115 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
22116 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
22117 // CHECK18-NEXT:    [[N_CASTED51:%.*]] = alloca i64, align 8
22118 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
22119 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
22120 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
22121 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
22122 // CHECK18-NEXT:    [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
22123 // CHECK18-NEXT:    [[_TMP59:%.*]] = alloca i32, align 4
22124 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
22125 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
22126 // CHECK18-NEXT:    store i32 0, i32* [[RETVAL]], align 4
22127 // CHECK18-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
22128 // CHECK18-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
22129 // CHECK18-NEXT:    store i32 100, i32* [[N]], align 4
22130 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
22131 // CHECK18-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
22132 // CHECK18-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
22133 // CHECK18-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
22134 // CHECK18-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
22135 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
22136 // CHECK18-NEXT:    store i32 10, i32* [[M]], align 4
22137 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
22138 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22139 // CHECK18-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4
22140 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
22141 // CHECK18-NEXT:    [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
22142 // CHECK18-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22143 // CHECK18-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
22144 // CHECK18-NEXT:    store i64 [[TMP4]], i64* [[TMP7]], align 8
22145 // CHECK18-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22146 // CHECK18-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
22147 // CHECK18-NEXT:    store i64 [[TMP4]], i64* [[TMP9]], align 8
22148 // CHECK18-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
22149 // CHECK18-NEXT:    store i64 4, i64* [[TMP10]], align 8
22150 // CHECK18-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
22151 // CHECK18-NEXT:    store i8* null, i8** [[TMP11]], align 8
22152 // CHECK18-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
22153 // CHECK18-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
22154 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP13]], align 8
22155 // CHECK18-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
22156 // CHECK18-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
22157 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP15]], align 8
22158 // CHECK18-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
22159 // CHECK18-NEXT:    store i64 8, i64* [[TMP16]], align 8
22160 // CHECK18-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
22161 // CHECK18-NEXT:    store i8* null, i8** [[TMP17]], align 8
22162 // CHECK18-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
22163 // CHECK18-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
22164 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 8
22165 // CHECK18-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
22166 // CHECK18-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
22167 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 8
22168 // CHECK18-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
22169 // CHECK18-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
22170 // CHECK18-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
22171 // CHECK18-NEXT:    store i8* null, i8** [[TMP23]], align 8
22172 // CHECK18-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22173 // CHECK18-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22174 // CHECK18-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
22175 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
22176 // CHECK18-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
22177 // CHECK18-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22178 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
22179 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22180 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22181 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22182 // CHECK18-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22183 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
22184 // CHECK18-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
22185 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
22186 // CHECK18-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
22187 // CHECK18-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22188 // CHECK18-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
22189 // CHECK18:       omp_offload.failed:
22190 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
22191 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT]]
22192 // CHECK18:       omp_offload.cont:
22193 // CHECK18-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
22194 // CHECK18-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
22195 // CHECK18-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
22196 // CHECK18-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
22197 // CHECK18-NEXT:    [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
22198 // CHECK18-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
22199 // CHECK18-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
22200 // CHECK18-NEXT:    store i64 [[TMP34]], i64* [[TMP37]], align 8
22201 // CHECK18-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
22202 // CHECK18-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
22203 // CHECK18-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
22204 // CHECK18-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
22205 // CHECK18-NEXT:    store i64 4, i64* [[TMP40]], align 8
22206 // CHECK18-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
22207 // CHECK18-NEXT:    store i8* null, i8** [[TMP41]], align 8
22208 // CHECK18-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
22209 // CHECK18-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
22210 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP43]], align 8
22211 // CHECK18-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
22212 // CHECK18-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
22213 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP45]], align 8
22214 // CHECK18-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
22215 // CHECK18-NEXT:    store i64 8, i64* [[TMP46]], align 8
22216 // CHECK18-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
22217 // CHECK18-NEXT:    store i8* null, i8** [[TMP47]], align 8
22218 // CHECK18-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
22219 // CHECK18-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
22220 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP49]], align 8
22221 // CHECK18-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
22222 // CHECK18-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
22223 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP51]], align 8
22224 // CHECK18-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
22225 // CHECK18-NEXT:    store i64 [[TMP35]], i64* [[TMP52]], align 8
22226 // CHECK18-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
22227 // CHECK18-NEXT:    store i8* null, i8** [[TMP53]], align 8
22228 // CHECK18-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
22229 // CHECK18-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
22230 // CHECK18-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
22231 // CHECK18-NEXT:    [[TMP57:%.*]] = load i32, i32* [[N]], align 4
22232 // CHECK18-NEXT:    store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
22233 // CHECK18-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
22234 // CHECK18-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
22235 // CHECK18-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
22236 // CHECK18-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
22237 // CHECK18-NEXT:    store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
22238 // CHECK18-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
22239 // CHECK18-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
22240 // CHECK18-NEXT:    [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
22241 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
22242 // CHECK18-NEXT:    [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
22243 // CHECK18-NEXT:    [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
22244 // CHECK18-NEXT:    br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
22245 // CHECK18:       omp_offload.failed16:
22246 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
22247 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
22248 // CHECK18:       omp_offload.cont17:
22249 // CHECK18-NEXT:    [[TMP63:%.*]] = load i32, i32* [[M]], align 4
22250 // CHECK18-NEXT:    store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
22251 // CHECK18-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N]], align 4
22252 // CHECK18-NEXT:    [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
22253 // CHECK18-NEXT:    store i32 [[TMP64]], i32* [[CONV20]], align 4
22254 // CHECK18-NEXT:    [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
22255 // CHECK18-NEXT:    [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
22256 // CHECK18-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
22257 // CHECK18-NEXT:    store i32 [[TMP66]], i32* [[CONV21]], align 4
22258 // CHECK18-NEXT:    [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
22259 // CHECK18-NEXT:    [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
22260 // CHECK18-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
22261 // CHECK18-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
22262 // CHECK18-NEXT:    store i64 [[TMP65]], i64* [[TMP70]], align 8
22263 // CHECK18-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
22264 // CHECK18-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
22265 // CHECK18-NEXT:    store i64 [[TMP65]], i64* [[TMP72]], align 8
22266 // CHECK18-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
22267 // CHECK18-NEXT:    store i64 4, i64* [[TMP73]], align 8
22268 // CHECK18-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
22269 // CHECK18-NEXT:    store i8* null, i8** [[TMP74]], align 8
22270 // CHECK18-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
22271 // CHECK18-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
22272 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP76]], align 8
22273 // CHECK18-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
22274 // CHECK18-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
22275 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP78]], align 8
22276 // CHECK18-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
22277 // CHECK18-NEXT:    store i64 8, i64* [[TMP79]], align 8
22278 // CHECK18-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
22279 // CHECK18-NEXT:    store i8* null, i8** [[TMP80]], align 8
22280 // CHECK18-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
22281 // CHECK18-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
22282 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP82]], align 8
22283 // CHECK18-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
22284 // CHECK18-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
22285 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 8
22286 // CHECK18-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
22287 // CHECK18-NEXT:    store i64 [[TMP68]], i64* [[TMP85]], align 8
22288 // CHECK18-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
22289 // CHECK18-NEXT:    store i8* null, i8** [[TMP86]], align 8
22290 // CHECK18-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
22291 // CHECK18-NEXT:    [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
22292 // CHECK18-NEXT:    store i64 [[TMP67]], i64* [[TMP88]], align 8
22293 // CHECK18-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
22294 // CHECK18-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
22295 // CHECK18-NEXT:    store i64 [[TMP67]], i64* [[TMP90]], align 8
22296 // CHECK18-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
22297 // CHECK18-NEXT:    store i64 4, i64* [[TMP91]], align 8
22298 // CHECK18-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
22299 // CHECK18-NEXT:    store i8* null, i8** [[TMP92]], align 8
22300 // CHECK18-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
22301 // CHECK18-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
22302 // CHECK18-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
22303 // CHECK18-NEXT:    [[TMP96:%.*]] = load i32, i32* [[N]], align 4
22304 // CHECK18-NEXT:    store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
22305 // CHECK18-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
22306 // CHECK18-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
22307 // CHECK18-NEXT:    [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
22308 // CHECK18-NEXT:    [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
22309 // CHECK18-NEXT:    store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
22310 // CHECK18-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
22311 // CHECK18-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
22312 // CHECK18-NEXT:    [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
22313 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
22314 // CHECK18-NEXT:    [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
22315 // CHECK18-NEXT:    [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
22316 // CHECK18-NEXT:    br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
22317 // CHECK18:       omp_offload.failed33:
22318 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
22319 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT34]]
22320 // CHECK18:       omp_offload.cont34:
22321 // CHECK18-NEXT:    [[TMP102:%.*]] = load i32, i32* [[N]], align 4
22322 // CHECK18-NEXT:    [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
22323 // CHECK18-NEXT:    store i32 [[TMP102]], i32* [[CONV36]], align 4
22324 // CHECK18-NEXT:    [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
22325 // CHECK18-NEXT:    [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
22326 // CHECK18-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
22327 // CHECK18-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
22328 // CHECK18-NEXT:    store i64 [[TMP103]], i64* [[TMP106]], align 8
22329 // CHECK18-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
22330 // CHECK18-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
22331 // CHECK18-NEXT:    store i64 [[TMP103]], i64* [[TMP108]], align 8
22332 // CHECK18-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
22333 // CHECK18-NEXT:    store i64 4, i64* [[TMP109]], align 8
22334 // CHECK18-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
22335 // CHECK18-NEXT:    store i8* null, i8** [[TMP110]], align 8
22336 // CHECK18-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
22337 // CHECK18-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
22338 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP112]], align 8
22339 // CHECK18-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
22340 // CHECK18-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
22341 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP114]], align 8
22342 // CHECK18-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
22343 // CHECK18-NEXT:    store i64 8, i64* [[TMP115]], align 8
22344 // CHECK18-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
22345 // CHECK18-NEXT:    store i8* null, i8** [[TMP116]], align 8
22346 // CHECK18-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
22347 // CHECK18-NEXT:    [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
22348 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP118]], align 8
22349 // CHECK18-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
22350 // CHECK18-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
22351 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP120]], align 8
22352 // CHECK18-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
22353 // CHECK18-NEXT:    store i64 [[TMP104]], i64* [[TMP121]], align 8
22354 // CHECK18-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
22355 // CHECK18-NEXT:    store i8* null, i8** [[TMP122]], align 8
22356 // CHECK18-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
22357 // CHECK18-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
22358 // CHECK18-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
22359 // CHECK18-NEXT:    [[TMP126:%.*]] = load i32, i32* [[N]], align 4
22360 // CHECK18-NEXT:    store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
22361 // CHECK18-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
22362 // CHECK18-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
22363 // CHECK18-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
22364 // CHECK18-NEXT:    [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
22365 // CHECK18-NEXT:    store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
22366 // CHECK18-NEXT:    [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
22367 // CHECK18-NEXT:    [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
22368 // CHECK18-NEXT:    [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
22369 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
22370 // CHECK18-NEXT:    [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
22371 // CHECK18-NEXT:    [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
22372 // CHECK18-NEXT:    br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
22373 // CHECK18:       omp_offload.failed48:
22374 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
22375 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT49]]
22376 // CHECK18:       omp_offload.cont49:
22377 // CHECK18-NEXT:    [[TMP132:%.*]] = load i32, i32* [[M]], align 4
22378 // CHECK18-NEXT:    store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
22379 // CHECK18-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
22380 // CHECK18-NEXT:    [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
22381 // CHECK18-NEXT:    store i32 [[TMP133]], i32* [[CONV52]], align 4
22382 // CHECK18-NEXT:    [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
22383 // CHECK18-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
22384 // CHECK18-NEXT:    [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
22385 // CHECK18-NEXT:    store i32 [[TMP135]], i32* [[CONV54]], align 4
22386 // CHECK18-NEXT:    [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
22387 // CHECK18-NEXT:    [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
22388 // CHECK18-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
22389 // CHECK18-NEXT:    [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
22390 // CHECK18-NEXT:    store i64 [[TMP134]], i64* [[TMP139]], align 8
22391 // CHECK18-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
22392 // CHECK18-NEXT:    [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
22393 // CHECK18-NEXT:    store i64 [[TMP134]], i64* [[TMP141]], align 8
22394 // CHECK18-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
22395 // CHECK18-NEXT:    store i64 4, i64* [[TMP142]], align 8
22396 // CHECK18-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
22397 // CHECK18-NEXT:    store i8* null, i8** [[TMP143]], align 8
22398 // CHECK18-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
22399 // CHECK18-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
22400 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP145]], align 8
22401 // CHECK18-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
22402 // CHECK18-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
22403 // CHECK18-NEXT:    store i64 [[TMP1]], i64* [[TMP147]], align 8
22404 // CHECK18-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
22405 // CHECK18-NEXT:    store i64 8, i64* [[TMP148]], align 8
22406 // CHECK18-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
22407 // CHECK18-NEXT:    store i8* null, i8** [[TMP149]], align 8
22408 // CHECK18-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
22409 // CHECK18-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
22410 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP151]], align 8
22411 // CHECK18-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
22412 // CHECK18-NEXT:    [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
22413 // CHECK18-NEXT:    store i32* [[VLA]], i32** [[TMP153]], align 8
22414 // CHECK18-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
22415 // CHECK18-NEXT:    store i64 [[TMP137]], i64* [[TMP154]], align 8
22416 // CHECK18-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
22417 // CHECK18-NEXT:    store i8* null, i8** [[TMP155]], align 8
22418 // CHECK18-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
22419 // CHECK18-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
22420 // CHECK18-NEXT:    store i64 [[TMP136]], i64* [[TMP157]], align 8
22421 // CHECK18-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
22422 // CHECK18-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
22423 // CHECK18-NEXT:    store i64 [[TMP136]], i64* [[TMP159]], align 8
22424 // CHECK18-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
22425 // CHECK18-NEXT:    store i64 4, i64* [[TMP160]], align 8
22426 // CHECK18-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
22427 // CHECK18-NEXT:    store i8* null, i8** [[TMP161]], align 8
22428 // CHECK18-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
22429 // CHECK18-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
22430 // CHECK18-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
22431 // CHECK18-NEXT:    [[TMP165:%.*]] = load i32, i32* [[N]], align 4
22432 // CHECK18-NEXT:    store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
22433 // CHECK18-NEXT:    [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
22434 // CHECK18-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
22435 // CHECK18-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
22436 // CHECK18-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
22437 // CHECK18-NEXT:    store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
22438 // CHECK18-NEXT:    [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
22439 // CHECK18-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
22440 // CHECK18-NEXT:    [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
22441 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
22442 // CHECK18-NEXT:    [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
22443 // CHECK18-NEXT:    [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
22444 // CHECK18-NEXT:    br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
22445 // CHECK18:       omp_offload.failed66:
22446 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
22447 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT67]]
22448 // CHECK18:       omp_offload.cont67:
22449 // CHECK18-NEXT:    [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
22450 // CHECK18-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
22451 // CHECK18-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
22452 // CHECK18-NEXT:    [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
22453 // CHECK18-NEXT:    call void @llvm.stackrestore(i8* [[TMP172]])
22454 // CHECK18-NEXT:    [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
22455 // CHECK18-NEXT:    ret i32 [[TMP173]]
22456 //
22457 //
22458 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
22459 // CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
22460 // CHECK18-NEXT:  entry:
22461 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22462 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22463 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22464 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22465 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22466 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22467 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22468 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22469 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22470 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22471 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22472 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22473 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
22474 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
22475 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
22476 // CHECK18-NEXT:    ret void
22477 //
22478 //
22479 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
22480 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
22481 // CHECK18-NEXT:  entry:
22482 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22483 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22484 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22485 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22486 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22487 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22488 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22489 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22490 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22491 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
22492 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22493 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22494 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22495 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22496 // CHECK18-NEXT:    [[I3:%.*]] = alloca i32, align 4
22497 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22498 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22499 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22500 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22501 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22502 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22503 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22504 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22505 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22506 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22507 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
22508 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22509 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22510 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22511 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22512 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22513 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
22514 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22515 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
22516 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22517 // CHECK18:       omp.precond.then:
22518 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22519 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22520 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
22521 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22522 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22523 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22524 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
22525 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22526 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22527 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22528 // CHECK18-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
22529 // CHECK18-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22530 // CHECK18:       cond.true:
22531 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22532 // CHECK18-NEXT:    br label [[COND_END:%.*]]
22533 // CHECK18:       cond.false:
22534 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22535 // CHECK18-NEXT:    br label [[COND_END]]
22536 // CHECK18:       cond.end:
22537 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
22538 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22539 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22540 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
22541 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22542 // CHECK18:       omp.inner.for.cond:
22543 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
22544 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
22545 // CHECK18-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
22546 // CHECK18-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22547 // CHECK18:       omp.inner.for.body:
22548 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !13
22549 // CHECK18-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
22550 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !13
22551 // CHECK18-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
22552 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !13
22553 // CHECK18-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22554 // CHECK18-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !13
22555 // CHECK18-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !13
22556 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !13
22557 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22558 // CHECK18:       omp.inner.for.inc:
22559 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
22560 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !13
22561 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
22562 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
22563 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
22564 // CHECK18:       omp.inner.for.end:
22565 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22566 // CHECK18:       omp.loop.exit:
22567 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22568 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
22569 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
22570 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22571 // CHECK18-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
22572 // CHECK18-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22573 // CHECK18:       .omp.final.then:
22574 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22575 // CHECK18-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
22576 // CHECK18-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22577 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22578 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22579 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
22580 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22581 // CHECK18:       .omp.final.done:
22582 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
22583 // CHECK18:       omp.precond.end:
22584 // CHECK18-NEXT:    ret void
22585 //
22586 //
22587 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1
22588 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
22589 // CHECK18-NEXT:  entry:
22590 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22591 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22592 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
22593 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
22594 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22595 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22596 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22597 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22598 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22599 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22600 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22601 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
22602 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22603 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22604 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22605 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22606 // CHECK18-NEXT:    [[I5:%.*]] = alloca i32, align 4
22607 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22608 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22609 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
22610 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
22611 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22612 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22613 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22614 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22615 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22616 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22617 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22618 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
22619 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22620 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22621 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22622 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22623 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22624 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
22625 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22626 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
22627 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22628 // CHECK18:       omp.precond.then:
22629 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22630 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22631 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
22632 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
22633 // CHECK18-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
22634 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
22635 // CHECK18-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
22636 // CHECK18-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
22637 // CHECK18-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
22638 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22639 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22640 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22641 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22642 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22643 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22644 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22645 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
22646 // CHECK18-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22647 // CHECK18:       cond.true:
22648 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22649 // CHECK18-NEXT:    br label [[COND_END:%.*]]
22650 // CHECK18:       cond.false:
22651 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22652 // CHECK18-NEXT:    br label [[COND_END]]
22653 // CHECK18:       cond.end:
22654 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
22655 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22656 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22657 // CHECK18-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
22658 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22659 // CHECK18:       omp.inner.for.cond:
22660 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
22661 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !17
22662 // CHECK18-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
22663 // CHECK18-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22664 // CHECK18:       omp.inner.for.body:
22665 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
22666 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
22667 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22668 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !17
22669 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !17
22670 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
22671 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
22672 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !17
22673 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22674 // CHECK18:       omp.body.continue:
22675 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22676 // CHECK18:       omp.inner.for.inc:
22677 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
22678 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
22679 // CHECK18-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
22680 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
22681 // CHECK18:       omp.inner.for.end:
22682 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22683 // CHECK18:       omp.loop.exit:
22684 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22685 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
22686 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
22687 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22688 // CHECK18-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
22689 // CHECK18-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22690 // CHECK18:       .omp.final.then:
22691 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22692 // CHECK18-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
22693 // CHECK18-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
22694 // CHECK18-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
22695 // CHECK18-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
22696 // CHECK18-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
22697 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22698 // CHECK18:       .omp.final.done:
22699 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
22700 // CHECK18:       omp.precond.end:
22701 // CHECK18-NEXT:    ret void
22702 //
22703 //
22704 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
22705 // CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
22706 // CHECK18-NEXT:  entry:
22707 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22708 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22709 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22710 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22711 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22712 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22713 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22714 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22715 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22716 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22717 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22718 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22719 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
22720 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
22721 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
22722 // CHECK18-NEXT:    ret void
22723 //
22724 //
22725 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2
22726 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
22727 // CHECK18-NEXT:  entry:
22728 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22729 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22730 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22731 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22732 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22733 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22734 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22735 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22736 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22737 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
22738 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22739 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22740 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22741 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22742 // CHECK18-NEXT:    [[I3:%.*]] = alloca i32, align 4
22743 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22744 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22745 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22746 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22747 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22748 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22749 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22750 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22751 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22752 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22753 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
22754 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22755 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22756 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22757 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22758 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22759 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
22760 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22761 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
22762 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22763 // CHECK18:       omp.precond.then:
22764 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22765 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22766 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
22767 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22768 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22769 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22770 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
22771 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22772 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22773 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22774 // CHECK18-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
22775 // CHECK18-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22776 // CHECK18:       cond.true:
22777 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22778 // CHECK18-NEXT:    br label [[COND_END:%.*]]
22779 // CHECK18:       cond.false:
22780 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22781 // CHECK18-NEXT:    br label [[COND_END]]
22782 // CHECK18:       cond.end:
22783 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
22784 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22785 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22786 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
22787 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22788 // CHECK18:       omp.inner.for.cond:
22789 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
22790 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
22791 // CHECK18-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
22792 // CHECK18-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22793 // CHECK18:       omp.inner.for.body:
22794 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !22
22795 // CHECK18-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
22796 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !22
22797 // CHECK18-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
22798 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !22
22799 // CHECK18-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22800 // CHECK18-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !22
22801 // CHECK18-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !22
22802 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !22
22803 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22804 // CHECK18:       omp.inner.for.inc:
22805 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
22806 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !22
22807 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
22808 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
22809 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
22810 // CHECK18:       omp.inner.for.end:
22811 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22812 // CHECK18:       omp.loop.exit:
22813 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22814 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
22815 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
22816 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22817 // CHECK18-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
22818 // CHECK18-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22819 // CHECK18:       .omp.final.then:
22820 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22821 // CHECK18-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
22822 // CHECK18-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22823 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22824 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22825 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
22826 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22827 // CHECK18:       .omp.final.done:
22828 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
22829 // CHECK18:       omp.precond.end:
22830 // CHECK18-NEXT:    ret void
22831 //
22832 //
22833 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3
22834 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
22835 // CHECK18-NEXT:  entry:
22836 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22837 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22838 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
22839 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
22840 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22841 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22842 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22843 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22844 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22845 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22846 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22847 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
22848 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22849 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22850 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22851 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22852 // CHECK18-NEXT:    [[I5:%.*]] = alloca i32, align 4
22853 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22854 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22855 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
22856 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
22857 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22858 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22859 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22860 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22861 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22862 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22863 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22864 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
22865 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22866 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22867 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22868 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22869 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22870 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
22871 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22872 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
22873 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22874 // CHECK18:       omp.precond.then:
22875 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22876 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22877 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
22878 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
22879 // CHECK18-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
22880 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
22881 // CHECK18-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
22882 // CHECK18-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
22883 // CHECK18-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
22884 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22885 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22886 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22887 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22888 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22889 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22890 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22891 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
22892 // CHECK18-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22893 // CHECK18:       cond.true:
22894 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22895 // CHECK18-NEXT:    br label [[COND_END:%.*]]
22896 // CHECK18:       cond.false:
22897 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22898 // CHECK18-NEXT:    br label [[COND_END]]
22899 // CHECK18:       cond.end:
22900 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
22901 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22902 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22903 // CHECK18-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
22904 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22905 // CHECK18:       omp.inner.for.cond:
22906 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
22907 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
22908 // CHECK18-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
22909 // CHECK18-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22910 // CHECK18:       omp.inner.for.body:
22911 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
22912 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
22913 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22914 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !25
22915 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !25
22916 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
22917 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
22918 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
22919 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22920 // CHECK18:       omp.body.continue:
22921 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22922 // CHECK18:       omp.inner.for.inc:
22923 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
22924 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
22925 // CHECK18-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
22926 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
22927 // CHECK18:       omp.inner.for.end:
22928 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22929 // CHECK18:       omp.loop.exit:
22930 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22931 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
22932 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
22933 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22934 // CHECK18-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
22935 // CHECK18-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22936 // CHECK18:       .omp.final.then:
22937 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22938 // CHECK18-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP24]], 0
22939 // CHECK18-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
22940 // CHECK18-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
22941 // CHECK18-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
22942 // CHECK18-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
22943 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22944 // CHECK18:       .omp.final.done:
22945 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
22946 // CHECK18:       omp.precond.end:
22947 // CHECK18-NEXT:    ret void
22948 //
22949 //
22950 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
22951 // CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
22952 // CHECK18-NEXT:  entry:
22953 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22954 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22955 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22956 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
22957 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22958 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
22959 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22960 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22961 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
22962 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
22963 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22964 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22965 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
22966 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
22967 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
22968 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22969 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
22970 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
22971 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
22972 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
22973 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
22974 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
22975 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
22976 // CHECK18-NEXT:    ret void
22977 //
22978 //
22979 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5
22980 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
22981 // CHECK18-NEXT:  entry:
22982 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22983 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22984 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22985 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22986 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
22987 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
22988 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22989 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22990 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22991 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
22992 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
22993 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22994 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22995 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22996 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22997 // CHECK18-NEXT:    [[I5:%.*]] = alloca i32, align 4
22998 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22999 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
23000 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23001 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23002 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23003 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23004 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23005 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23006 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23007 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23008 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23009 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23010 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23011 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
23012 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23013 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23014 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23015 // CHECK18-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
23016 // CHECK18-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
23017 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23018 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23019 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23020 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23021 // CHECK18:       omp.precond.then:
23022 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23023 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23024 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
23025 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23026 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23027 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
23028 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23029 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
23030 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
23031 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23032 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23033 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
23034 // CHECK18-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23035 // CHECK18:       cond.true:
23036 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23037 // CHECK18-NEXT:    br label [[COND_END:%.*]]
23038 // CHECK18:       cond.false:
23039 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23040 // CHECK18-NEXT:    br label [[COND_END]]
23041 // CHECK18:       cond.end:
23042 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
23043 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23044 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23045 // CHECK18-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
23046 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23047 // CHECK18:       omp.inner.for.cond:
23048 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
23049 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
23050 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
23051 // CHECK18-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
23052 // CHECK18-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23053 // CHECK18:       omp.inner.for.body:
23054 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
23055 // CHECK18-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
23056 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23057 // CHECK18-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
23058 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !28
23059 // CHECK18-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23060 // CHECK18-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4, !llvm.access.group !28
23061 // CHECK18-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !28
23062 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !28
23063 // CHECK18-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
23064 // CHECK18-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28
23065 // CHECK18-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28
23066 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28
23067 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23068 // CHECK18:       omp.inner.for.inc:
23069 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
23070 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
23071 // CHECK18-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
23072 // CHECK18-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
23073 // CHECK18-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
23074 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
23075 // CHECK18-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
23076 // CHECK18-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
23077 // CHECK18-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23078 // CHECK18-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !28
23079 // CHECK18-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
23080 // CHECK18-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23081 // CHECK18-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23082 // CHECK18-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
23083 // CHECK18-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
23084 // CHECK18-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
23085 // CHECK18:       cond.true14:
23086 // CHECK18-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4, !llvm.access.group !28
23087 // CHECK18-NEXT:    br label [[COND_END16:%.*]]
23088 // CHECK18:       cond.false15:
23089 // CHECK18-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23090 // CHECK18-NEXT:    br label [[COND_END16]]
23091 // CHECK18:       cond.end16:
23092 // CHECK18-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
23093 // CHECK18-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !28
23094 // CHECK18-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !28
23095 // CHECK18-NEXT:    store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
23096 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
23097 // CHECK18:       omp.inner.for.end:
23098 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23099 // CHECK18:       omp.loop.exit:
23100 // CHECK18-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23101 // CHECK18-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
23102 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
23103 // CHECK18-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23104 // CHECK18-NEXT:    [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0
23105 // CHECK18-NEXT:    br i1 [[TMP38]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23106 // CHECK18:       .omp.final.then:
23107 // CHECK18-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23108 // CHECK18-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP39]], 0
23109 // CHECK18-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
23110 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV19]], 1
23111 // CHECK18-NEXT:    [[ADD20:%.*]] = add nsw i32 0, [[MUL]]
23112 // CHECK18-NEXT:    store i32 [[ADD20]], i32* [[I5]], align 4
23113 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23114 // CHECK18:       .omp.final.done:
23115 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23116 // CHECK18:       omp.precond.end:
23117 // CHECK18-NEXT:    ret void
23118 //
23119 //
23120 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6
23121 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
23122 // CHECK18-NEXT:  entry:
23123 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23124 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23125 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
23126 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
23127 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23128 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23129 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23130 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23131 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23132 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23133 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
23134 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
23135 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23136 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23137 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23138 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23139 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23140 // CHECK18-NEXT:    [[I7:%.*]] = alloca i32, align 4
23141 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23142 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23143 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23144 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23145 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23146 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23147 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23148 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23149 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23150 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23151 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23152 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23153 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23154 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
23155 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23156 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23157 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23158 // CHECK18-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
23159 // CHECK18-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
23160 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23161 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23162 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23163 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23164 // CHECK18:       omp.precond.then:
23165 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23166 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23167 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
23168 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23169 // CHECK18-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
23170 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23171 // CHECK18-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
23172 // CHECK18-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
23173 // CHECK18-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
23174 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23175 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23176 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23177 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23178 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23179 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23180 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23181 // CHECK18-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
23182 // CHECK18-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23183 // CHECK18:       cond.true:
23184 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23185 // CHECK18-NEXT:    br label [[COND_END:%.*]]
23186 // CHECK18:       cond.false:
23187 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23188 // CHECK18-NEXT:    br label [[COND_END]]
23189 // CHECK18:       cond.end:
23190 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
23191 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23192 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23193 // CHECK18-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
23194 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23195 // CHECK18:       omp.inner.for.cond:
23196 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
23197 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
23198 // CHECK18-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
23199 // CHECK18-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23200 // CHECK18:       omp.inner.for.body:
23201 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
23202 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
23203 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23204 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !31
23205 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !31
23206 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
23207 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
23208 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !31
23209 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23210 // CHECK18:       omp.body.continue:
23211 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23212 // CHECK18:       omp.inner.for.inc:
23213 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
23214 // CHECK18-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
23215 // CHECK18-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
23216 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
23217 // CHECK18:       omp.inner.for.end:
23218 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23219 // CHECK18:       omp.loop.exit:
23220 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23221 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
23222 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
23223 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23224 // CHECK18-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
23225 // CHECK18-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23226 // CHECK18:       .omp.final.then:
23227 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23228 // CHECK18-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP24]], 0
23229 // CHECK18-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
23230 // CHECK18-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
23231 // CHECK18-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
23232 // CHECK18-NEXT:    store i32 [[ADD14]], i32* [[I7]], align 4
23233 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23234 // CHECK18:       .omp.final.done:
23235 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23236 // CHECK18:       omp.precond.end:
23237 // CHECK18-NEXT:    ret void
23238 //
23239 //
23240 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
23241 // CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
23242 // CHECK18-NEXT:  entry:
23243 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23244 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23245 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23246 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
23247 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23248 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23249 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23250 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23251 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23252 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23253 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23254 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23255 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
23256 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
23257 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
23258 // CHECK18-NEXT:    ret void
23259 //
23260 //
23261 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8
23262 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
23263 // CHECK18-NEXT:  entry:
23264 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23265 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23266 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23267 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23268 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23269 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23270 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23271 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23272 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23273 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23274 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23275 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23276 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23277 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23278 // CHECK18-NEXT:    [[I3:%.*]] = alloca i32, align 4
23279 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
23280 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23281 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23282 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23283 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23284 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23285 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23286 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23287 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23288 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23289 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
23290 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23291 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23292 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23293 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23294 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23295 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23296 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23297 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23298 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23299 // CHECK18:       omp.precond.then:
23300 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23301 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23302 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
23303 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23304 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23305 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23306 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
23307 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23308 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23309 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23310 // CHECK18-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
23311 // CHECK18-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23312 // CHECK18:       cond.true:
23313 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23314 // CHECK18-NEXT:    br label [[COND_END:%.*]]
23315 // CHECK18:       cond.false:
23316 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23317 // CHECK18-NEXT:    br label [[COND_END]]
23318 // CHECK18:       cond.end:
23319 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
23320 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23321 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23322 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
23323 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23324 // CHECK18:       omp.inner.for.cond:
23325 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
23326 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
23327 // CHECK18-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
23328 // CHECK18-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23329 // CHECK18:       omp.inner.for.body:
23330 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !34
23331 // CHECK18-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
23332 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34
23333 // CHECK18-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
23334 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !34
23335 // CHECK18-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23336 // CHECK18-NEXT:    store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34
23337 // CHECK18-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34
23338 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34
23339 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23340 // CHECK18:       omp.inner.for.inc:
23341 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
23342 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !34
23343 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
23344 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
23345 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
23346 // CHECK18:       omp.inner.for.end:
23347 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23348 // CHECK18:       omp.loop.exit:
23349 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23350 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
23351 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
23352 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23353 // CHECK18-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
23354 // CHECK18-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23355 // CHECK18:       .omp.final.then:
23356 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23357 // CHECK18-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
23358 // CHECK18-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
23359 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
23360 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
23361 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
23362 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23363 // CHECK18:       .omp.final.done:
23364 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23365 // CHECK18:       omp.precond.end:
23366 // CHECK18-NEXT:    ret void
23367 //
23368 //
23369 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9
23370 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
23371 // CHECK18-NEXT:  entry:
23372 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23373 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23374 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
23375 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
23376 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23377 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23378 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23379 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23380 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23381 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23382 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23383 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23384 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23385 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23386 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23387 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23388 // CHECK18-NEXT:    [[I5:%.*]] = alloca i32, align 4
23389 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23390 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23391 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23392 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23393 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23394 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23395 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23396 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23397 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23398 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23399 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23400 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
23401 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23402 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23403 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23404 // CHECK18-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23405 // CHECK18-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23406 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23407 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23408 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23409 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23410 // CHECK18:       omp.precond.then:
23411 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23412 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23413 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
23414 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23415 // CHECK18-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
23416 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23417 // CHECK18-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
23418 // CHECK18-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
23419 // CHECK18-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
23420 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23421 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23422 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23423 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23424 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23425 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23426 // CHECK18-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
23427 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
23428 // CHECK18:       omp.dispatch.cond:
23429 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23430 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
23431 // CHECK18-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
23432 // CHECK18-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
23433 // CHECK18-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
23434 // CHECK18:       omp.dispatch.body:
23435 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23436 // CHECK18-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
23437 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23438 // CHECK18:       omp.inner.for.cond:
23439 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
23440 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
23441 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
23442 // CHECK18-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23443 // CHECK18:       omp.inner.for.body:
23444 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
23445 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
23446 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23447 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !37
23448 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !37
23449 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
23450 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
23451 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !37
23452 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23453 // CHECK18:       omp.body.continue:
23454 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23455 // CHECK18:       omp.inner.for.inc:
23456 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
23457 // CHECK18-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
23458 // CHECK18-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
23459 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
23460 // CHECK18:       omp.inner.for.end:
23461 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
23462 // CHECK18:       omp.dispatch.inc:
23463 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
23464 // CHECK18:       omp.dispatch.end:
23465 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23466 // CHECK18-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
23467 // CHECK18-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23468 // CHECK18:       .omp.final.then:
23469 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23470 // CHECK18-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0
23471 // CHECK18-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
23472 // CHECK18-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
23473 // CHECK18-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
23474 // CHECK18-NEXT:    store i32 [[ADD11]], i32* [[I5]], align 4
23475 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23476 // CHECK18:       .omp.final.done:
23477 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23478 // CHECK18:       omp.precond.end:
23479 // CHECK18-NEXT:    ret void
23480 //
23481 //
23482 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
23483 // CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
23484 // CHECK18-NEXT:  entry:
23485 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23486 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23487 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23488 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23489 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
23490 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
23491 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23492 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23493 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23494 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23495 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23496 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23497 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23498 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23499 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23500 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23501 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
23502 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
23503 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
23504 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
23505 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
23506 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
23507 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
23508 // CHECK18-NEXT:    ret void
23509 //
23510 //
23511 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11
23512 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
23513 // CHECK18-NEXT:  entry:
23514 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23515 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23516 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23517 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23518 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23519 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23520 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23521 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23522 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
23523 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
23524 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23525 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23526 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23527 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23528 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23529 // CHECK18-NEXT:    [[I5:%.*]] = alloca i32, align 4
23530 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
23531 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
23532 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23533 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23534 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23535 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23536 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23537 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23538 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23539 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23540 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23541 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23542 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23543 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
23544 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23545 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23546 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23547 // CHECK18-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
23548 // CHECK18-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
23549 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23550 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23551 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23552 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23553 // CHECK18:       omp.precond.then:
23554 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23555 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23556 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
23557 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23558 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23559 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23560 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
23561 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23562 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23563 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23564 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
23565 // CHECK18-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23566 // CHECK18:       cond.true:
23567 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23568 // CHECK18-NEXT:    br label [[COND_END:%.*]]
23569 // CHECK18:       cond.false:
23570 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23571 // CHECK18-NEXT:    br label [[COND_END]]
23572 // CHECK18:       cond.end:
23573 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
23574 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23575 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23576 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
23577 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23578 // CHECK18:       omp.inner.for.cond:
23579 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
23580 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
23581 // CHECK18-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
23582 // CHECK18-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23583 // CHECK18:       omp.inner.for.body:
23584 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !40
23585 // CHECK18-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
23586 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !40
23587 // CHECK18-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
23588 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !40
23589 // CHECK18-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23590 // CHECK18-NEXT:    store i32 [[TMP19]], i32* [[CONV8]], align 4, !llvm.access.group !40
23591 // CHECK18-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !40
23592 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !40
23593 // CHECK18-NEXT:    [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
23594 // CHECK18-NEXT:    store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40
23595 // CHECK18-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40
23596 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40
23597 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23598 // CHECK18:       omp.inner.for.inc:
23599 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
23600 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !40
23601 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
23602 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
23603 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
23604 // CHECK18:       omp.inner.for.end:
23605 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23606 // CHECK18:       omp.loop.exit:
23607 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23608 // CHECK18-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
23609 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
23610 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23611 // CHECK18-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
23612 // CHECK18-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23613 // CHECK18:       .omp.final.then:
23614 // CHECK18-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23615 // CHECK18-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP29]], 0
23616 // CHECK18-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23617 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV11]], 1
23618 // CHECK18-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL]]
23619 // CHECK18-NEXT:    store i32 [[ADD12]], i32* [[I5]], align 4
23620 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23621 // CHECK18:       .omp.final.done:
23622 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23623 // CHECK18:       omp.precond.end:
23624 // CHECK18-NEXT:    ret void
23625 //
23626 //
23627 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12
23628 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
23629 // CHECK18-NEXT:  entry:
23630 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23631 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23632 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
23633 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
23634 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23635 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23636 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
23637 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23638 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23639 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23640 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
23641 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
23642 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23643 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23644 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23645 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23646 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23647 // CHECK18-NEXT:    [[I7:%.*]] = alloca i32, align 4
23648 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23649 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23650 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23651 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23652 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23653 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23654 // CHECK18-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
23655 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23656 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23657 // CHECK18-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23658 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
23659 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23660 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
23661 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
23662 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23663 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
23664 // CHECK18-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23665 // CHECK18-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
23666 // CHECK18-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
23667 // CHECK18-NEXT:    store i32 0, i32* [[I]], align 4
23668 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23669 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
23670 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23671 // CHECK18:       omp.precond.then:
23672 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23673 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
23674 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
23675 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23676 // CHECK18-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
23677 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23678 // CHECK18-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
23679 // CHECK18-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
23680 // CHECK18-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
23681 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23682 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23683 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
23684 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23685 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23686 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23687 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
23688 // CHECK18-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
23689 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
23690 // CHECK18:       omp.dispatch.cond:
23691 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23692 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
23693 // CHECK18-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
23694 // CHECK18-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
23695 // CHECK18-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
23696 // CHECK18:       omp.dispatch.body:
23697 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23698 // CHECK18-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23699 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23700 // CHECK18:       omp.inner.for.cond:
23701 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
23702 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43
23703 // CHECK18-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23704 // CHECK18-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23705 // CHECK18:       omp.inner.for.body:
23706 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
23707 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23708 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23709 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !43
23710 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !43
23711 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
23712 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
23713 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !43
23714 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23715 // CHECK18:       omp.body.continue:
23716 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23717 // CHECK18:       omp.inner.for.inc:
23718 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
23719 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
23720 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
23721 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
23722 // CHECK18:       omp.inner.for.end:
23723 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
23724 // CHECK18:       omp.dispatch.inc:
23725 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
23726 // CHECK18:       omp.dispatch.end:
23727 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23728 // CHECK18-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
23729 // CHECK18-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23730 // CHECK18:       .omp.final.then:
23731 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23732 // CHECK18-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP24]], 0
23733 // CHECK18-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23734 // CHECK18-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23735 // CHECK18-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23736 // CHECK18-NEXT:    store i32 [[ADD13]], i32* [[I7]], align 4
23737 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23738 // CHECK18:       .omp.final.done:
23739 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
23740 // CHECK18:       omp.precond.end:
23741 // CHECK18-NEXT:    ret void
23742 //
23743 //
23744 // CHECK18-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
23745 // CHECK18-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
23746 // CHECK18-NEXT:  entry:
23747 // CHECK18-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
23748 // CHECK18-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
23749 // CHECK18-NEXT:    [[M:%.*]] = alloca i32, align 4
23750 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
23751 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
23752 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
23753 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23754 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
23755 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
23756 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
23757 // CHECK18-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
23758 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23759 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
23760 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
23761 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
23762 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
23763 // CHECK18-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
23764 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
23765 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
23766 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
23767 // CHECK18-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
23768 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
23769 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
23770 // CHECK18-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
23771 // CHECK18-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
23772 // CHECK18-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
23773 // CHECK18-NEXT:    [[_TMP25:%.*]] = alloca i32, align 4
23774 // CHECK18-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
23775 // CHECK18-NEXT:    store i32 10, i32* [[M]], align 4
23776 // CHECK18-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
23777 // CHECK18-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
23778 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
23779 // CHECK18-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
23780 // CHECK18-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
23781 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
23782 // CHECK18-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
23783 // CHECK18-NEXT:    store i8* null, i8** [[TMP4]], align 8
23784 // CHECK18-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
23785 // CHECK18-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
23786 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
23787 // CHECK18-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
23788 // CHECK18-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
23789 // CHECK18-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
23790 // CHECK18:       omp_offload.failed:
23791 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
23792 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT]]
23793 // CHECK18:       omp_offload.cont:
23794 // CHECK18-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
23795 // CHECK18-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
23796 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
23797 // CHECK18-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
23798 // CHECK18-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
23799 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
23800 // CHECK18-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
23801 // CHECK18-NEXT:    store i8* null, i8** [[TMP13]], align 8
23802 // CHECK18-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
23803 // CHECK18-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
23804 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
23805 // CHECK18-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
23806 // CHECK18-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
23807 // CHECK18-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
23808 // CHECK18:       omp_offload.failed5:
23809 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
23810 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
23811 // CHECK18:       omp_offload.cont6:
23812 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
23813 // CHECK18-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
23814 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23815 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
23816 // CHECK18-NEXT:    store i32 [[TMP19]], i32* [[CONV]], align 4
23817 // CHECK18-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
23818 // CHECK18-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
23819 // CHECK18-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
23820 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
23821 // CHECK18-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
23822 // CHECK18-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
23823 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
23824 // CHECK18-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
23825 // CHECK18-NEXT:    store i8* null, i8** [[TMP25]], align 8
23826 // CHECK18-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
23827 // CHECK18-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
23828 // CHECK18-NEXT:    store i64 [[TMP20]], i64* [[TMP27]], align 8
23829 // CHECK18-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
23830 // CHECK18-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
23831 // CHECK18-NEXT:    store i64 [[TMP20]], i64* [[TMP29]], align 8
23832 // CHECK18-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
23833 // CHECK18-NEXT:    store i8* null, i8** [[TMP30]], align 8
23834 // CHECK18-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
23835 // CHECK18-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
23836 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
23837 // CHECK18-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
23838 // CHECK18-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
23839 // CHECK18-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
23840 // CHECK18:       omp_offload.failed11:
23841 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
23842 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
23843 // CHECK18:       omp_offload.cont12:
23844 // CHECK18-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
23845 // CHECK18-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
23846 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
23847 // CHECK18-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
23848 // CHECK18-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
23849 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
23850 // CHECK18-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
23851 // CHECK18-NEXT:    store i8* null, i8** [[TMP39]], align 8
23852 // CHECK18-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
23853 // CHECK18-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
23854 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
23855 // CHECK18-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
23856 // CHECK18-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
23857 // CHECK18-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
23858 // CHECK18:       omp_offload.failed17:
23859 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
23860 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
23861 // CHECK18:       omp_offload.cont18:
23862 // CHECK18-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
23863 // CHECK18-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
23864 // CHECK18-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
23865 // CHECK18-NEXT:    [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
23866 // CHECK18-NEXT:    store i32 [[TMP45]], i32* [[CONV21]], align 4
23867 // CHECK18-NEXT:    [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
23868 // CHECK18-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
23869 // CHECK18-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
23870 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
23871 // CHECK18-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
23872 // CHECK18-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
23873 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
23874 // CHECK18-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
23875 // CHECK18-NEXT:    store i8* null, i8** [[TMP51]], align 8
23876 // CHECK18-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
23877 // CHECK18-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
23878 // CHECK18-NEXT:    store i64 [[TMP46]], i64* [[TMP53]], align 8
23879 // CHECK18-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
23880 // CHECK18-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
23881 // CHECK18-NEXT:    store i64 [[TMP46]], i64* [[TMP55]], align 8
23882 // CHECK18-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
23883 // CHECK18-NEXT:    store i8* null, i8** [[TMP56]], align 8
23884 // CHECK18-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
23885 // CHECK18-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
23886 // CHECK18-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
23887 // CHECK18-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
23888 // CHECK18-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
23889 // CHECK18-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
23890 // CHECK18:       omp_offload.failed26:
23891 // CHECK18-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
23892 // CHECK18-NEXT:    br label [[OMP_OFFLOAD_CONT27]]
23893 // CHECK18:       omp_offload.cont27:
23894 // CHECK18-NEXT:    ret i32 0
23895 //
23896 //
23897 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
23898 // CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
23899 // CHECK18-NEXT:  entry:
23900 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
23901 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
23902 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
23903 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
23904 // CHECK18-NEXT:    ret void
23905 //
23906 //
23907 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14
23908 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
23909 // CHECK18-NEXT:  entry:
23910 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23911 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23912 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
23913 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23914 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23915 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23916 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23917 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23918 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23919 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23920 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23921 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23922 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
23923 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
23924 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23925 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
23926 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23927 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23928 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23929 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
23930 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23931 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23932 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
23933 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23934 // CHECK18:       cond.true:
23935 // CHECK18-NEXT:    br label [[COND_END:%.*]]
23936 // CHECK18:       cond.false:
23937 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23938 // CHECK18-NEXT:    br label [[COND_END]]
23939 // CHECK18:       cond.end:
23940 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23941 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23942 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23943 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23944 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23945 // CHECK18:       omp.inner.for.cond:
23946 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
23947 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
23948 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
23949 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23950 // CHECK18:       omp.inner.for.body:
23951 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !46
23952 // CHECK18-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
23953 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46
23954 // CHECK18-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
23955 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46
23956 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23957 // CHECK18:       omp.inner.for.inc:
23958 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
23959 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !46
23960 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
23961 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
23962 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
23963 // CHECK18:       omp.inner.for.end:
23964 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23965 // CHECK18:       omp.loop.exit:
23966 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
23967 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23968 // CHECK18-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
23969 // CHECK18-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23970 // CHECK18:       .omp.final.then:
23971 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
23972 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23973 // CHECK18:       .omp.final.done:
23974 // CHECK18-NEXT:    ret void
23975 //
23976 //
23977 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15
23978 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
23979 // CHECK18-NEXT:  entry:
23980 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23981 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23982 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
23983 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
23984 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
23985 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23986 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23987 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23988 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23989 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23990 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23991 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
23992 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23993 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23994 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
23995 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
23996 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
23997 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
23998 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23999 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24000 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24001 // CHECK18-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
24002 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24003 // CHECK18-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
24004 // CHECK18-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
24005 // CHECK18-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
24006 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24007 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24008 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24009 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
24010 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24011 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24012 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
24013 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24014 // CHECK18:       cond.true:
24015 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24016 // CHECK18:       cond.false:
24017 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24018 // CHECK18-NEXT:    br label [[COND_END]]
24019 // CHECK18:       cond.end:
24020 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
24021 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24022 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24023 // CHECK18-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
24024 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24025 // CHECK18:       omp.inner.for.cond:
24026 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
24027 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !49
24028 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
24029 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24030 // CHECK18:       omp.inner.for.body:
24031 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
24032 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
24033 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24034 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !49
24035 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !49
24036 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
24037 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
24038 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !49
24039 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24040 // CHECK18:       omp.body.continue:
24041 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24042 // CHECK18:       omp.inner.for.inc:
24043 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
24044 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
24045 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
24046 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
24047 // CHECK18:       omp.inner.for.end:
24048 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24049 // CHECK18:       omp.loop.exit:
24050 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
24051 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24052 // CHECK18-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
24053 // CHECK18-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24054 // CHECK18:       .omp.final.then:
24055 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24056 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24057 // CHECK18:       .omp.final.done:
24058 // CHECK18-NEXT:    ret void
24059 //
24060 //
24061 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
24062 // CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24063 // CHECK18-NEXT:  entry:
24064 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24065 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24066 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24067 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
24068 // CHECK18-NEXT:    ret void
24069 //
24070 //
24071 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17
24072 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24073 // CHECK18-NEXT:  entry:
24074 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24075 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24076 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24077 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24078 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24079 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24080 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24081 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24082 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24083 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24084 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24085 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24086 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24087 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24088 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24089 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
24090 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24091 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24092 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24093 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
24094 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24095 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24096 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
24097 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24098 // CHECK18:       cond.true:
24099 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24100 // CHECK18:       cond.false:
24101 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24102 // CHECK18-NEXT:    br label [[COND_END]]
24103 // CHECK18:       cond.end:
24104 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
24105 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24106 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24107 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24108 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24109 // CHECK18:       omp.inner.for.cond:
24110 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
24111 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
24112 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
24113 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24114 // CHECK18:       omp.inner.for.body:
24115 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !52
24116 // CHECK18-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
24117 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52
24118 // CHECK18-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
24119 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52
24120 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24121 // CHECK18:       omp.inner.for.inc:
24122 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
24123 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !52
24124 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
24125 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
24126 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
24127 // CHECK18:       omp.inner.for.end:
24128 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24129 // CHECK18:       omp.loop.exit:
24130 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
24131 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24132 // CHECK18-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
24133 // CHECK18-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24134 // CHECK18:       .omp.final.then:
24135 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24136 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24137 // CHECK18:       .omp.final.done:
24138 // CHECK18-NEXT:    ret void
24139 //
24140 //
24141 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18
24142 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24143 // CHECK18-NEXT:  entry:
24144 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24145 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24146 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
24147 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
24148 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24149 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24150 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24151 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24152 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24153 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24154 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24155 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24156 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24157 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24158 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24159 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24160 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24161 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24162 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24163 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24164 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24165 // CHECK18-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
24166 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24167 // CHECK18-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
24168 // CHECK18-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
24169 // CHECK18-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
24170 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24171 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24172 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24173 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
24174 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24175 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24176 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
24177 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24178 // CHECK18:       cond.true:
24179 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24180 // CHECK18:       cond.false:
24181 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24182 // CHECK18-NEXT:    br label [[COND_END]]
24183 // CHECK18:       cond.end:
24184 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
24185 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24186 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24187 // CHECK18-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
24188 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24189 // CHECK18:       omp.inner.for.cond:
24190 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
24191 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !55
24192 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
24193 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24194 // CHECK18:       omp.inner.for.body:
24195 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
24196 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
24197 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24198 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !55
24199 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !55
24200 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
24201 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
24202 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !55
24203 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24204 // CHECK18:       omp.body.continue:
24205 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24206 // CHECK18:       omp.inner.for.inc:
24207 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
24208 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
24209 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55
24210 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
24211 // CHECK18:       omp.inner.for.end:
24212 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24213 // CHECK18:       omp.loop.exit:
24214 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
24215 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24216 // CHECK18-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
24217 // CHECK18-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24218 // CHECK18:       .omp.final.then:
24219 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24220 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24221 // CHECK18:       .omp.final.done:
24222 // CHECK18-NEXT:    ret void
24223 //
24224 //
24225 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
24226 // CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24227 // CHECK18-NEXT:  entry:
24228 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24229 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24230 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
24231 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24232 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24233 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24234 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24235 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
24236 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
24237 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
24238 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
24239 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
24240 // CHECK18-NEXT:    ret void
24241 //
24242 //
24243 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21
24244 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24245 // CHECK18-NEXT:  entry:
24246 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24247 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24248 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24249 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24250 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24251 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24252 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24253 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24254 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24255 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24256 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24257 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
24258 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24259 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24260 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24261 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24262 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24263 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24264 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24265 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
24266 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24267 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24268 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24269 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
24270 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24271 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24272 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
24273 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24274 // CHECK18:       cond.true:
24275 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24276 // CHECK18:       cond.false:
24277 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24278 // CHECK18-NEXT:    br label [[COND_END]]
24279 // CHECK18:       cond.end:
24280 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
24281 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24282 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24283 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24284 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24285 // CHECK18:       omp.inner.for.cond:
24286 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
24287 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
24288 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
24289 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24290 // CHECK18:       omp.inner.for.body:
24291 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !58
24292 // CHECK18-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
24293 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !58
24294 // CHECK18-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
24295 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !58
24296 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
24297 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58
24298 // CHECK18-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58
24299 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58
24300 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24301 // CHECK18:       omp.inner.for.inc:
24302 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
24303 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !58
24304 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
24305 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58
24306 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
24307 // CHECK18:       omp.inner.for.end:
24308 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24309 // CHECK18:       omp.loop.exit:
24310 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
24311 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24312 // CHECK18-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
24313 // CHECK18-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24314 // CHECK18:       .omp.final.then:
24315 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24316 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24317 // CHECK18:       .omp.final.done:
24318 // CHECK18-NEXT:    ret void
24319 //
24320 //
24321 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22
24322 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24323 // CHECK18-NEXT:  entry:
24324 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24325 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24326 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
24327 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
24328 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24329 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24330 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24331 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24332 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24333 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24334 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24335 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24336 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24337 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24338 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24339 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24340 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24341 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24342 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24343 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24344 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24345 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24346 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24347 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24348 // CHECK18-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
24349 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24350 // CHECK18-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
24351 // CHECK18-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
24352 // CHECK18-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
24353 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24354 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24355 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
24356 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24357 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
24358 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
24359 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24360 // CHECK18:       omp.dispatch.cond:
24361 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24362 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24363 // CHECK18-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
24364 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
24365 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24366 // CHECK18:       cond.true:
24367 // CHECK18-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24368 // CHECK18-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
24369 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24370 // CHECK18:       cond.false:
24371 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24372 // CHECK18-NEXT:    br label [[COND_END]]
24373 // CHECK18:       cond.end:
24374 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
24375 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24376 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24377 // CHECK18-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
24378 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24379 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24380 // CHECK18-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
24381 // CHECK18-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24382 // CHECK18:       omp.dispatch.body:
24383 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24384 // CHECK18:       omp.inner.for.cond:
24385 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
24386 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !61
24387 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
24388 // CHECK18-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24389 // CHECK18:       omp.inner.for.body:
24390 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
24391 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
24392 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24393 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !61
24394 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !61
24395 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
24396 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
24397 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !61
24398 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24399 // CHECK18:       omp.body.continue:
24400 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24401 // CHECK18:       omp.inner.for.inc:
24402 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
24403 // CHECK18-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
24404 // CHECK18-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !61
24405 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
24406 // CHECK18:       omp.inner.for.end:
24407 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24408 // CHECK18:       omp.dispatch.inc:
24409 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24410 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24411 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
24412 // CHECK18-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
24413 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24414 // CHECK18-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24415 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
24416 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
24417 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
24418 // CHECK18:       omp.dispatch.end:
24419 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
24420 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24421 // CHECK18-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
24422 // CHECK18-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24423 // CHECK18:       .omp.final.then:
24424 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24425 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24426 // CHECK18:       .omp.final.done:
24427 // CHECK18-NEXT:    ret void
24428 //
24429 //
24430 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
24431 // CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24432 // CHECK18-NEXT:  entry:
24433 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24434 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24435 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24436 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
24437 // CHECK18-NEXT:    ret void
24438 //
24439 //
24440 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25
24441 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24442 // CHECK18-NEXT:  entry:
24443 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24444 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24445 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24446 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24447 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24448 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24449 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24450 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24451 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24452 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24453 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24454 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24455 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24456 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24457 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24458 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
24459 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24460 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24461 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24462 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
24463 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24464 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24465 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
24466 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24467 // CHECK18:       cond.true:
24468 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24469 // CHECK18:       cond.false:
24470 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24471 // CHECK18-NEXT:    br label [[COND_END]]
24472 // CHECK18:       cond.end:
24473 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
24474 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24475 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24476 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24477 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24478 // CHECK18:       omp.inner.for.cond:
24479 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
24480 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
24481 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
24482 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24483 // CHECK18:       omp.inner.for.body:
24484 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !64
24485 // CHECK18-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
24486 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64
24487 // CHECK18-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
24488 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64
24489 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24490 // CHECK18:       omp.inner.for.inc:
24491 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
24492 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !64
24493 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
24494 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64
24495 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
24496 // CHECK18:       omp.inner.for.end:
24497 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24498 // CHECK18:       omp.loop.exit:
24499 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
24500 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24501 // CHECK18-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
24502 // CHECK18-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24503 // CHECK18:       .omp.final.then:
24504 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24505 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24506 // CHECK18:       .omp.final.done:
24507 // CHECK18-NEXT:    ret void
24508 //
24509 //
24510 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26
24511 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
24512 // CHECK18-NEXT:  entry:
24513 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24514 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24515 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
24516 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
24517 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24518 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24519 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24520 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24521 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24522 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24523 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24524 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24525 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24526 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24527 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24528 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24529 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24530 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24531 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24532 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24533 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24534 // CHECK18-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
24535 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24536 // CHECK18-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
24537 // CHECK18-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
24538 // CHECK18-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
24539 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24540 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24541 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24542 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24543 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24544 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
24545 // CHECK18-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
24546 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24547 // CHECK18:       omp.dispatch.cond:
24548 // CHECK18-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24549 // CHECK18-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
24550 // CHECK18-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24551 // CHECK18:       omp.dispatch.body:
24552 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24553 // CHECK18-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
24554 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24555 // CHECK18:       omp.inner.for.cond:
24556 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
24557 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !67
24558 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
24559 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24560 // CHECK18:       omp.inner.for.body:
24561 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
24562 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
24563 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24564 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !67
24565 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !67
24566 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
24567 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
24568 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !67
24569 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24570 // CHECK18:       omp.body.continue:
24571 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24572 // CHECK18:       omp.inner.for.inc:
24573 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
24574 // CHECK18-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
24575 // CHECK18-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !67
24576 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
24577 // CHECK18:       omp.inner.for.end:
24578 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24579 // CHECK18:       omp.dispatch.inc:
24580 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
24581 // CHECK18:       omp.dispatch.end:
24582 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24583 // CHECK18-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
24584 // CHECK18-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24585 // CHECK18:       .omp.final.then:
24586 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24587 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24588 // CHECK18:       .omp.final.done:
24589 // CHECK18-NEXT:    ret void
24590 //
24591 //
24592 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
24593 // CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24594 // CHECK18-NEXT:  entry:
24595 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24596 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24597 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
24598 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24599 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24600 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24601 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24602 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
24603 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
24604 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV1]], align 4
24605 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
24606 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
24607 // CHECK18-NEXT:    ret void
24608 //
24609 //
24610 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29
24611 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24612 // CHECK18-NEXT:  entry:
24613 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24614 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24615 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24616 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24617 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24618 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24619 // CHECK18-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24620 // CHECK18-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24621 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24622 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24623 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24624 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
24625 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24626 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24627 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24628 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24629 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24630 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24631 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24632 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
24633 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24634 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24635 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24636 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
24637 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24638 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24639 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
24640 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24641 // CHECK18:       cond.true:
24642 // CHECK18-NEXT:    br label [[COND_END:%.*]]
24643 // CHECK18:       cond.false:
24644 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24645 // CHECK18-NEXT:    br label [[COND_END]]
24646 // CHECK18:       cond.end:
24647 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
24648 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24649 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24650 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24651 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24652 // CHECK18:       omp.inner.for.cond:
24653 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
24654 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
24655 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
24656 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24657 // CHECK18:       omp.inner.for.body:
24658 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !70
24659 // CHECK18-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
24660 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !70
24661 // CHECK18-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
24662 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !70
24663 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
24664 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70
24665 // CHECK18-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70
24666 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70
24667 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24668 // CHECK18:       omp.inner.for.inc:
24669 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
24670 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !70
24671 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
24672 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70
24673 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
24674 // CHECK18:       omp.inner.for.end:
24675 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24676 // CHECK18:       omp.loop.exit:
24677 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
24678 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24679 // CHECK18-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
24680 // CHECK18-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24681 // CHECK18:       .omp.final.then:
24682 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24683 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24684 // CHECK18:       .omp.final.done:
24685 // CHECK18-NEXT:    ret void
24686 //
24687 //
24688 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30
24689 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
24690 // CHECK18-NEXT:  entry:
24691 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
24692 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
24693 // CHECK18-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
24694 // CHECK18-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
24695 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
24696 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
24697 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24698 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24699 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24700 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24701 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24702 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24703 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
24704 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
24705 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
24706 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24707 // CHECK18-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24708 // CHECK18-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
24709 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
24710 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
24711 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
24712 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24713 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24714 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
24715 // CHECK18-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
24716 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
24717 // CHECK18-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
24718 // CHECK18-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
24719 // CHECK18-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
24720 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24721 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24722 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
24723 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24724 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24725 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
24726 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
24727 // CHECK18-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
24728 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24729 // CHECK18:       omp.dispatch.cond:
24730 // CHECK18-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24731 // CHECK18-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
24732 // CHECK18-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24733 // CHECK18:       omp.dispatch.body:
24734 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24735 // CHECK18-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
24736 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24737 // CHECK18:       omp.inner.for.cond:
24738 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
24739 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !73
24740 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
24741 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24742 // CHECK18:       omp.inner.for.body:
24743 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
24744 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
24745 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24746 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !73
24747 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !73
24748 // CHECK18-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
24749 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
24750 // CHECK18-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !73
24751 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24752 // CHECK18:       omp.body.continue:
24753 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24754 // CHECK18:       omp.inner.for.inc:
24755 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
24756 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
24757 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !73
24758 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
24759 // CHECK18:       omp.inner.for.end:
24760 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24761 // CHECK18:       omp.dispatch.inc:
24762 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
24763 // CHECK18:       omp.dispatch.end:
24764 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24765 // CHECK18-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
24766 // CHECK18-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24767 // CHECK18:       .omp.final.then:
24768 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
24769 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24770 // CHECK18:       .omp.final.done:
24771 // CHECK18-NEXT:    ret void
24772 //
24773 //
24774 // CHECK18-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
24775 // CHECK18-SAME: () #[[ATTR5:[0-9]+]] {
24776 // CHECK18-NEXT:  entry:
24777 // CHECK18-NEXT:    call void @__tgt_register_requires(i64 1)
24778 // CHECK18-NEXT:    ret void
24779 //
24780 //
24781 // CHECK19-LABEL: define {{[^@]+}}@main
24782 // CHECK19-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
24783 // CHECK19-NEXT:  entry:
24784 // CHECK19-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
24785 // CHECK19-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
24786 // CHECK19-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
24787 // CHECK19-NEXT:    [[N:%.*]] = alloca i32, align 4
24788 // CHECK19-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
24789 // CHECK19-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
24790 // CHECK19-NEXT:    [[M:%.*]] = alloca i32, align 4
24791 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24792 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
24793 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
24794 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
24795 // CHECK19-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
24796 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24797 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24798 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24799 // CHECK19-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
24800 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
24801 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
24802 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
24803 // CHECK19-NEXT:    [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
24804 // CHECK19-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
24805 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
24806 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
24807 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
24808 // CHECK19-NEXT:    [[N_CASTED18:%.*]] = alloca i32, align 4
24809 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24810 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
24811 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
24812 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
24813 // CHECK19-NEXT:    [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
24814 // CHECK19-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
24815 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
24816 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
24817 // CHECK19-NEXT:    [[N_CASTED32:%.*]] = alloca i32, align 4
24818 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
24819 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
24820 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
24821 // CHECK19-NEXT:    [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
24822 // CHECK19-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
24823 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
24824 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
24825 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
24826 // CHECK19-NEXT:    [[N_CASTED47:%.*]] = alloca i32, align 4
24827 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
24828 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
24829 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
24830 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
24831 // CHECK19-NEXT:    [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
24832 // CHECK19-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
24833 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
24834 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
24835 // CHECK19-NEXT:    store i32 0, i32* [[RETVAL]], align 4
24836 // CHECK19-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
24837 // CHECK19-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
24838 // CHECK19-NEXT:    store i32 100, i32* [[N]], align 4
24839 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
24840 // CHECK19-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
24841 // CHECK19-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
24842 // CHECK19-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
24843 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
24844 // CHECK19-NEXT:    store i32 10, i32* [[M]], align 4
24845 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
24846 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
24847 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
24848 // CHECK19-NEXT:    [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
24849 // CHECK19-NEXT:    [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
24850 // CHECK19-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
24851 // CHECK19-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
24852 // CHECK19-NEXT:    store i32 [[TMP3]], i32* [[TMP7]], align 4
24853 // CHECK19-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
24854 // CHECK19-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
24855 // CHECK19-NEXT:    store i32 [[TMP3]], i32* [[TMP9]], align 4
24856 // CHECK19-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
24857 // CHECK19-NEXT:    store i64 4, i64* [[TMP10]], align 4
24858 // CHECK19-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
24859 // CHECK19-NEXT:    store i8* null, i8** [[TMP11]], align 4
24860 // CHECK19-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
24861 // CHECK19-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
24862 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP13]], align 4
24863 // CHECK19-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
24864 // CHECK19-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
24865 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP15]], align 4
24866 // CHECK19-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
24867 // CHECK19-NEXT:    store i64 4, i64* [[TMP16]], align 4
24868 // CHECK19-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
24869 // CHECK19-NEXT:    store i8* null, i8** [[TMP17]], align 4
24870 // CHECK19-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
24871 // CHECK19-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
24872 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 4
24873 // CHECK19-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
24874 // CHECK19-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
24875 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 4
24876 // CHECK19-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
24877 // CHECK19-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 4
24878 // CHECK19-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
24879 // CHECK19-NEXT:    store i8* null, i8** [[TMP23]], align 4
24880 // CHECK19-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
24881 // CHECK19-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
24882 // CHECK19-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
24883 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
24884 // CHECK19-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
24885 // CHECK19-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24886 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
24887 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24888 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24889 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24890 // CHECK19-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24891 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
24892 // CHECK19-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
24893 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
24894 // CHECK19-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
24895 // CHECK19-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
24896 // CHECK19-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
24897 // CHECK19:       omp_offload.failed:
24898 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
24899 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT]]
24900 // CHECK19:       omp_offload.cont:
24901 // CHECK19-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
24902 // CHECK19-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
24903 // CHECK19-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
24904 // CHECK19-NEXT:    [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
24905 // CHECK19-NEXT:    [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
24906 // CHECK19-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
24907 // CHECK19-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
24908 // CHECK19-NEXT:    store i32 [[TMP34]], i32* [[TMP38]], align 4
24909 // CHECK19-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
24910 // CHECK19-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
24911 // CHECK19-NEXT:    store i32 [[TMP34]], i32* [[TMP40]], align 4
24912 // CHECK19-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
24913 // CHECK19-NEXT:    store i64 4, i64* [[TMP41]], align 4
24914 // CHECK19-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
24915 // CHECK19-NEXT:    store i8* null, i8** [[TMP42]], align 4
24916 // CHECK19-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
24917 // CHECK19-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
24918 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP44]], align 4
24919 // CHECK19-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
24920 // CHECK19-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
24921 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP46]], align 4
24922 // CHECK19-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
24923 // CHECK19-NEXT:    store i64 4, i64* [[TMP47]], align 4
24924 // CHECK19-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
24925 // CHECK19-NEXT:    store i8* null, i8** [[TMP48]], align 4
24926 // CHECK19-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
24927 // CHECK19-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
24928 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP50]], align 4
24929 // CHECK19-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
24930 // CHECK19-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
24931 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP52]], align 4
24932 // CHECK19-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
24933 // CHECK19-NEXT:    store i64 [[TMP36]], i64* [[TMP53]], align 4
24934 // CHECK19-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
24935 // CHECK19-NEXT:    store i8* null, i8** [[TMP54]], align 4
24936 // CHECK19-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
24937 // CHECK19-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
24938 // CHECK19-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
24939 // CHECK19-NEXT:    [[TMP58:%.*]] = load i32, i32* [[N]], align 4
24940 // CHECK19-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
24941 // CHECK19-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
24942 // CHECK19-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
24943 // CHECK19-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
24944 // CHECK19-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
24945 // CHECK19-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
24946 // CHECK19-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
24947 // CHECK19-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
24948 // CHECK19-NEXT:    [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
24949 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
24950 // CHECK19-NEXT:    [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
24951 // CHECK19-NEXT:    [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
24952 // CHECK19-NEXT:    br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
24953 // CHECK19:       omp_offload.failed15:
24954 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
24955 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
24956 // CHECK19:       omp_offload.cont16:
24957 // CHECK19-NEXT:    [[TMP64:%.*]] = load i32, i32* [[M]], align 4
24958 // CHECK19-NEXT:    store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
24959 // CHECK19-NEXT:    [[TMP65:%.*]] = load i32, i32* [[N]], align 4
24960 // CHECK19-NEXT:    store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
24961 // CHECK19-NEXT:    [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
24962 // CHECK19-NEXT:    [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
24963 // CHECK19-NEXT:    store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
24964 // CHECK19-NEXT:    [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
24965 // CHECK19-NEXT:    [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
24966 // CHECK19-NEXT:    [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
24967 // CHECK19-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
24968 // CHECK19-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
24969 // CHECK19-NEXT:    store i32 [[TMP66]], i32* [[TMP72]], align 4
24970 // CHECK19-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
24971 // CHECK19-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
24972 // CHECK19-NEXT:    store i32 [[TMP66]], i32* [[TMP74]], align 4
24973 // CHECK19-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
24974 // CHECK19-NEXT:    store i64 4, i64* [[TMP75]], align 4
24975 // CHECK19-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
24976 // CHECK19-NEXT:    store i8* null, i8** [[TMP76]], align 4
24977 // CHECK19-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
24978 // CHECK19-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
24979 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP78]], align 4
24980 // CHECK19-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
24981 // CHECK19-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
24982 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP80]], align 4
24983 // CHECK19-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
24984 // CHECK19-NEXT:    store i64 4, i64* [[TMP81]], align 4
24985 // CHECK19-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
24986 // CHECK19-NEXT:    store i8* null, i8** [[TMP82]], align 4
24987 // CHECK19-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
24988 // CHECK19-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
24989 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 4
24990 // CHECK19-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
24991 // CHECK19-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
24992 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP86]], align 4
24993 // CHECK19-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
24994 // CHECK19-NEXT:    store i64 [[TMP70]], i64* [[TMP87]], align 4
24995 // CHECK19-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
24996 // CHECK19-NEXT:    store i8* null, i8** [[TMP88]], align 4
24997 // CHECK19-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
24998 // CHECK19-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
24999 // CHECK19-NEXT:    store i32 [[TMP68]], i32* [[TMP90]], align 4
25000 // CHECK19-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
25001 // CHECK19-NEXT:    [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
25002 // CHECK19-NEXT:    store i32 [[TMP68]], i32* [[TMP92]], align 4
25003 // CHECK19-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
25004 // CHECK19-NEXT:    store i64 4, i64* [[TMP93]], align 4
25005 // CHECK19-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
25006 // CHECK19-NEXT:    store i8* null, i8** [[TMP94]], align 4
25007 // CHECK19-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
25008 // CHECK19-NEXT:    [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
25009 // CHECK19-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
25010 // CHECK19-NEXT:    [[TMP98:%.*]] = load i32, i32* [[N]], align 4
25011 // CHECK19-NEXT:    store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
25012 // CHECK19-NEXT:    [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
25013 // CHECK19-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
25014 // CHECK19-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
25015 // CHECK19-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
25016 // CHECK19-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
25017 // CHECK19-NEXT:    [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
25018 // CHECK19-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
25019 // CHECK19-NEXT:    [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
25020 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
25021 // CHECK19-NEXT:    [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
25022 // CHECK19-NEXT:    [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
25023 // CHECK19-NEXT:    br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
25024 // CHECK19:       omp_offload.failed30:
25025 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
25026 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
25027 // CHECK19:       omp_offload.cont31:
25028 // CHECK19-NEXT:    [[TMP104:%.*]] = load i32, i32* [[N]], align 4
25029 // CHECK19-NEXT:    store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
25030 // CHECK19-NEXT:    [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
25031 // CHECK19-NEXT:    [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
25032 // CHECK19-NEXT:    [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
25033 // CHECK19-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
25034 // CHECK19-NEXT:    [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
25035 // CHECK19-NEXT:    store i32 [[TMP105]], i32* [[TMP109]], align 4
25036 // CHECK19-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
25037 // CHECK19-NEXT:    [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
25038 // CHECK19-NEXT:    store i32 [[TMP105]], i32* [[TMP111]], align 4
25039 // CHECK19-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
25040 // CHECK19-NEXT:    store i64 4, i64* [[TMP112]], align 4
25041 // CHECK19-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
25042 // CHECK19-NEXT:    store i8* null, i8** [[TMP113]], align 4
25043 // CHECK19-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
25044 // CHECK19-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
25045 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP115]], align 4
25046 // CHECK19-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
25047 // CHECK19-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
25048 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP117]], align 4
25049 // CHECK19-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
25050 // CHECK19-NEXT:    store i64 4, i64* [[TMP118]], align 4
25051 // CHECK19-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
25052 // CHECK19-NEXT:    store i8* null, i8** [[TMP119]], align 4
25053 // CHECK19-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
25054 // CHECK19-NEXT:    [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
25055 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP121]], align 4
25056 // CHECK19-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
25057 // CHECK19-NEXT:    [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
25058 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP123]], align 4
25059 // CHECK19-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
25060 // CHECK19-NEXT:    store i64 [[TMP107]], i64* [[TMP124]], align 4
25061 // CHECK19-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
25062 // CHECK19-NEXT:    store i8* null, i8** [[TMP125]], align 4
25063 // CHECK19-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
25064 // CHECK19-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
25065 // CHECK19-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
25066 // CHECK19-NEXT:    [[TMP129:%.*]] = load i32, i32* [[N]], align 4
25067 // CHECK19-NEXT:    store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
25068 // CHECK19-NEXT:    [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
25069 // CHECK19-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
25070 // CHECK19-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
25071 // CHECK19-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
25072 // CHECK19-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
25073 // CHECK19-NEXT:    [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
25074 // CHECK19-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
25075 // CHECK19-NEXT:    [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
25076 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
25077 // CHECK19-NEXT:    [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
25078 // CHECK19-NEXT:    [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
25079 // CHECK19-NEXT:    br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
25080 // CHECK19:       omp_offload.failed44:
25081 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
25082 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
25083 // CHECK19:       omp_offload.cont45:
25084 // CHECK19-NEXT:    [[TMP135:%.*]] = load i32, i32* [[M]], align 4
25085 // CHECK19-NEXT:    store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
25086 // CHECK19-NEXT:    [[TMP136:%.*]] = load i32, i32* [[N]], align 4
25087 // CHECK19-NEXT:    store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
25088 // CHECK19-NEXT:    [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
25089 // CHECK19-NEXT:    [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
25090 // CHECK19-NEXT:    store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
25091 // CHECK19-NEXT:    [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
25092 // CHECK19-NEXT:    [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
25093 // CHECK19-NEXT:    [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
25094 // CHECK19-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
25095 // CHECK19-NEXT:    [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
25096 // CHECK19-NEXT:    store i32 [[TMP137]], i32* [[TMP143]], align 4
25097 // CHECK19-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
25098 // CHECK19-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
25099 // CHECK19-NEXT:    store i32 [[TMP137]], i32* [[TMP145]], align 4
25100 // CHECK19-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
25101 // CHECK19-NEXT:    store i64 4, i64* [[TMP146]], align 4
25102 // CHECK19-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
25103 // CHECK19-NEXT:    store i8* null, i8** [[TMP147]], align 4
25104 // CHECK19-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
25105 // CHECK19-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
25106 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP149]], align 4
25107 // CHECK19-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
25108 // CHECK19-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
25109 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[TMP151]], align 4
25110 // CHECK19-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
25111 // CHECK19-NEXT:    store i64 4, i64* [[TMP152]], align 4
25112 // CHECK19-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
25113 // CHECK19-NEXT:    store i8* null, i8** [[TMP153]], align 4
25114 // CHECK19-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
25115 // CHECK19-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
25116 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP155]], align 4
25117 // CHECK19-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
25118 // CHECK19-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
25119 // CHECK19-NEXT:    store i32* [[VLA]], i32** [[TMP157]], align 4
25120 // CHECK19-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
25121 // CHECK19-NEXT:    store i64 [[TMP141]], i64* [[TMP158]], align 4
25122 // CHECK19-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
25123 // CHECK19-NEXT:    store i8* null, i8** [[TMP159]], align 4
25124 // CHECK19-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
25125 // CHECK19-NEXT:    [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
25126 // CHECK19-NEXT:    store i32 [[TMP139]], i32* [[TMP161]], align 4
25127 // CHECK19-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
25128 // CHECK19-NEXT:    [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
25129 // CHECK19-NEXT:    store i32 [[TMP139]], i32* [[TMP163]], align 4
25130 // CHECK19-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
25131 // CHECK19-NEXT:    store i64 4, i64* [[TMP164]], align 4
25132 // CHECK19-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
25133 // CHECK19-NEXT:    store i8* null, i8** [[TMP165]], align 4
25134 // CHECK19-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
25135 // CHECK19-NEXT:    [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
25136 // CHECK19-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
25137 // CHECK19-NEXT:    [[TMP169:%.*]] = load i32, i32* [[N]], align 4
25138 // CHECK19-NEXT:    store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
25139 // CHECK19-NEXT:    [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
25140 // CHECK19-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
25141 // CHECK19-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
25142 // CHECK19-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
25143 // CHECK19-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
25144 // CHECK19-NEXT:    [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
25145 // CHECK19-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
25146 // CHECK19-NEXT:    [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
25147 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
25148 // CHECK19-NEXT:    [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
25149 // CHECK19-NEXT:    [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
25150 // CHECK19-NEXT:    br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
25151 // CHECK19:       omp_offload.failed60:
25152 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
25153 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
25154 // CHECK19:       omp_offload.cont61:
25155 // CHECK19-NEXT:    [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
25156 // CHECK19-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
25157 // CHECK19-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
25158 // CHECK19-NEXT:    [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
25159 // CHECK19-NEXT:    call void @llvm.stackrestore(i8* [[TMP176]])
25160 // CHECK19-NEXT:    [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
25161 // CHECK19-NEXT:    ret i32 [[TMP177]]
25162 //
25163 //
25164 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
25165 // CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
25166 // CHECK19-NEXT:  entry:
25167 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25168 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25169 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25170 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25171 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25172 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25173 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25174 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25175 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25176 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25177 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
25178 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
25179 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
25180 // CHECK19-NEXT:    ret void
25181 //
25182 //
25183 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
25184 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25185 // CHECK19-NEXT:  entry:
25186 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25187 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25188 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25189 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25190 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25191 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25192 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25193 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25194 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25195 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25196 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25197 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25198 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25199 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25200 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
25201 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25202 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25203 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25204 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25205 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25206 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25207 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25208 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25209 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25210 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
25211 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25212 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25213 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25214 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25215 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25216 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25217 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25218 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25219 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25220 // CHECK19:       omp.precond.then:
25221 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25222 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25223 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
25224 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25225 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25226 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25227 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
25228 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25229 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25230 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25231 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
25232 // CHECK19-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25233 // CHECK19:       cond.true:
25234 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25235 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25236 // CHECK19:       cond.false:
25237 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25238 // CHECK19-NEXT:    br label [[COND_END]]
25239 // CHECK19:       cond.end:
25240 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
25241 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25242 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25243 // CHECK19-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
25244 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25245 // CHECK19:       omp.inner.for.cond:
25246 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
25247 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
25248 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
25249 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25250 // CHECK19:       omp.inner.for.body:
25251 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14
25252 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
25253 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !14
25254 // CHECK19-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !14
25255 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !14
25256 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !14
25257 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25258 // CHECK19:       omp.inner.for.inc:
25259 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
25260 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14
25261 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25262 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
25263 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
25264 // CHECK19:       omp.inner.for.end:
25265 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25266 // CHECK19:       omp.loop.exit:
25267 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25268 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25269 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25270 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25271 // CHECK19-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25272 // CHECK19-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25273 // CHECK19:       .omp.final.then:
25274 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25275 // CHECK19-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25276 // CHECK19-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25277 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25278 // CHECK19-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25279 // CHECK19-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25280 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25281 // CHECK19:       .omp.final.done:
25282 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25283 // CHECK19:       omp.precond.end:
25284 // CHECK19-NEXT:    ret void
25285 //
25286 //
25287 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1
25288 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25289 // CHECK19-NEXT:  entry:
25290 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25291 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25292 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25293 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25294 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25295 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25296 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25297 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25298 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25299 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25300 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25301 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25302 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25303 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25304 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25305 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25306 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
25307 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25308 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25309 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25310 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25311 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25312 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25313 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25314 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25315 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25316 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25317 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
25318 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25319 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25320 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25321 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25322 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25323 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25324 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25325 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25326 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25327 // CHECK19:       omp.precond.then:
25328 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25329 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25330 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
25331 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25332 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25333 // CHECK19-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
25334 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25335 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25336 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25337 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25338 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25339 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25340 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25341 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25342 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25343 // CHECK19-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25344 // CHECK19:       cond.true:
25345 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25346 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25347 // CHECK19:       cond.false:
25348 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25349 // CHECK19-NEXT:    br label [[COND_END]]
25350 // CHECK19:       cond.end:
25351 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25352 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25353 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25354 // CHECK19-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25355 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25356 // CHECK19:       omp.inner.for.cond:
25357 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
25358 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
25359 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25360 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25361 // CHECK19:       omp.inner.for.body:
25362 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
25363 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
25364 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25365 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !18
25366 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !18
25367 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
25368 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
25369 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25370 // CHECK19:       omp.body.continue:
25371 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25372 // CHECK19:       omp.inner.for.inc:
25373 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
25374 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
25375 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
25376 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
25377 // CHECK19:       omp.inner.for.end:
25378 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25379 // CHECK19:       omp.loop.exit:
25380 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25381 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
25382 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
25383 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25384 // CHECK19-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
25385 // CHECK19-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25386 // CHECK19:       .omp.final.then:
25387 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25388 // CHECK19-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
25389 // CHECK19-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
25390 // CHECK19-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
25391 // CHECK19-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
25392 // CHECK19-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
25393 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25394 // CHECK19:       .omp.final.done:
25395 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25396 // CHECK19:       omp.precond.end:
25397 // CHECK19-NEXT:    ret void
25398 //
25399 //
25400 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
25401 // CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25402 // CHECK19-NEXT:  entry:
25403 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25404 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25405 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25406 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25407 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25408 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25409 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25410 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25411 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25412 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25413 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
25414 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
25415 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
25416 // CHECK19-NEXT:    ret void
25417 //
25418 //
25419 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2
25420 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25421 // CHECK19-NEXT:  entry:
25422 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25423 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25424 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25425 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25426 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25427 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25428 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25429 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25430 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25431 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25432 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25433 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25434 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25435 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25436 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
25437 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25438 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25439 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25440 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25441 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25442 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25443 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25444 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25445 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25446 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
25447 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25448 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25449 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25450 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25451 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25452 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25453 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25454 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25455 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25456 // CHECK19:       omp.precond.then:
25457 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25458 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25459 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
25460 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25461 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25462 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25463 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
25464 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25465 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25466 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25467 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
25468 // CHECK19-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25469 // CHECK19:       cond.true:
25470 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25471 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25472 // CHECK19:       cond.false:
25473 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25474 // CHECK19-NEXT:    br label [[COND_END]]
25475 // CHECK19:       cond.end:
25476 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
25477 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25478 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25479 // CHECK19-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
25480 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25481 // CHECK19:       omp.inner.for.cond:
25482 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
25483 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
25484 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
25485 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25486 // CHECK19:       omp.inner.for.body:
25487 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
25488 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
25489 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !23
25490 // CHECK19-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !23
25491 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !23
25492 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !23
25493 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25494 // CHECK19:       omp.inner.for.inc:
25495 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
25496 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
25497 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25498 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
25499 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
25500 // CHECK19:       omp.inner.for.end:
25501 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25502 // CHECK19:       omp.loop.exit:
25503 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25504 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25505 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25506 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25507 // CHECK19-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25508 // CHECK19-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25509 // CHECK19:       .omp.final.then:
25510 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25511 // CHECK19-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25512 // CHECK19-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25513 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25514 // CHECK19-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25515 // CHECK19-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25516 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25517 // CHECK19:       .omp.final.done:
25518 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25519 // CHECK19:       omp.precond.end:
25520 // CHECK19-NEXT:    ret void
25521 //
25522 //
25523 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3
25524 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25525 // CHECK19-NEXT:  entry:
25526 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25527 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25528 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25529 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25530 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25531 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25532 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25533 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25534 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25535 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25536 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25537 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25538 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25539 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25540 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25541 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25542 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
25543 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25544 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25545 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25546 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25547 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25548 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25549 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25550 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25551 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25552 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25553 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
25554 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25555 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25556 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25557 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25558 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25559 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25560 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25561 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25562 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25563 // CHECK19:       omp.precond.then:
25564 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25565 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25566 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
25567 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25568 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25569 // CHECK19-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
25570 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25571 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25572 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25573 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25574 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25575 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25576 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25577 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25578 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25579 // CHECK19-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25580 // CHECK19:       cond.true:
25581 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25582 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25583 // CHECK19:       cond.false:
25584 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25585 // CHECK19-NEXT:    br label [[COND_END]]
25586 // CHECK19:       cond.end:
25587 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25588 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25589 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25590 // CHECK19-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25591 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25592 // CHECK19:       omp.inner.for.cond:
25593 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
25594 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
25595 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25596 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25597 // CHECK19:       omp.inner.for.body:
25598 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
25599 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
25600 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25601 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !26
25602 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !26
25603 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
25604 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
25605 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25606 // CHECK19:       omp.body.continue:
25607 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25608 // CHECK19:       omp.inner.for.inc:
25609 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
25610 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
25611 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
25612 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
25613 // CHECK19:       omp.inner.for.end:
25614 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25615 // CHECK19:       omp.loop.exit:
25616 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25617 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
25618 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
25619 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25620 // CHECK19-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
25621 // CHECK19-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25622 // CHECK19:       .omp.final.then:
25623 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25624 // CHECK19-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
25625 // CHECK19-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
25626 // CHECK19-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
25627 // CHECK19-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
25628 // CHECK19-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
25629 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25630 // CHECK19:       .omp.final.done:
25631 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25632 // CHECK19:       omp.precond.end:
25633 // CHECK19-NEXT:    ret void
25634 //
25635 //
25636 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
25637 // CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
25638 // CHECK19-NEXT:  entry:
25639 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25640 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25641 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25642 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
25643 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25644 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
25645 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25646 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25647 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25648 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25649 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25650 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25651 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25652 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
25653 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
25654 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25655 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
25656 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
25657 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
25658 // CHECK19-NEXT:    ret void
25659 //
25660 //
25661 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5
25662 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
25663 // CHECK19-NEXT:  entry:
25664 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25665 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25666 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25667 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25668 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25669 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
25670 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25671 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25672 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25673 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
25674 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25675 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25676 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25677 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25678 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25679 // CHECK19-NEXT:    [[I4:%.*]] = alloca i32, align 4
25680 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25681 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
25682 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25683 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25684 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25685 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25686 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25687 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25688 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25689 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25690 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25691 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25692 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25693 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25694 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25695 // CHECK19-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
25696 // CHECK19-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
25697 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25698 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25699 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25700 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25701 // CHECK19:       omp.precond.then:
25702 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25703 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25704 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
25705 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25706 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25707 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25708 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25709 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
25710 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
25711 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25712 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25713 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
25714 // CHECK19-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25715 // CHECK19:       cond.true:
25716 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25717 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25718 // CHECK19:       cond.false:
25719 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25720 // CHECK19-NEXT:    br label [[COND_END]]
25721 // CHECK19:       cond.end:
25722 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
25723 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25724 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25725 // CHECK19-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
25726 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25727 // CHECK19:       omp.inner.for.cond:
25728 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
25729 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
25730 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
25731 // CHECK19-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
25732 // CHECK19-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25733 // CHECK19:       omp.inner.for.body:
25734 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
25735 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25736 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !29
25737 // CHECK19-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4, !llvm.access.group !29
25738 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !29
25739 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29
25740 // CHECK19-NEXT:    store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
25741 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
25742 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29
25743 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25744 // CHECK19:       omp.inner.for.inc:
25745 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
25746 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
25747 // CHECK19-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
25748 // CHECK19-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
25749 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
25750 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
25751 // CHECK19-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
25752 // CHECK19-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
25753 // CHECK19-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25754 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
25755 // CHECK19-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
25756 // CHECK19-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25757 // CHECK19-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25758 // CHECK19-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
25759 // CHECK19-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
25760 // CHECK19-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
25761 // CHECK19:       cond.true11:
25762 // CHECK19-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
25763 // CHECK19-NEXT:    br label [[COND_END13:%.*]]
25764 // CHECK19:       cond.false12:
25765 // CHECK19-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25766 // CHECK19-NEXT:    br label [[COND_END13]]
25767 // CHECK19:       cond.end13:
25768 // CHECK19-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
25769 // CHECK19-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
25770 // CHECK19-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
25771 // CHECK19-NEXT:    store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
25772 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
25773 // CHECK19:       omp.inner.for.end:
25774 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25775 // CHECK19:       omp.loop.exit:
25776 // CHECK19-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25777 // CHECK19-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
25778 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
25779 // CHECK19-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25780 // CHECK19-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
25781 // CHECK19-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25782 // CHECK19:       .omp.final.then:
25783 // CHECK19-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25784 // CHECK19-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP37]], 0
25785 // CHECK19-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
25786 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
25787 // CHECK19-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
25788 // CHECK19-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
25789 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25790 // CHECK19:       .omp.final.done:
25791 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25792 // CHECK19:       omp.precond.end:
25793 // CHECK19-NEXT:    ret void
25794 //
25795 //
25796 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6
25797 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
25798 // CHECK19-NEXT:  entry:
25799 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25800 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25801 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25802 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25803 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25804 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25805 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25806 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
25807 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25808 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25809 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25810 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
25811 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25812 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25813 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25814 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25815 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25816 // CHECK19-NEXT:    [[I4:%.*]] = alloca i32, align 4
25817 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25818 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25819 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25820 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25821 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25822 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25823 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25824 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25825 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25826 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25827 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25828 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25829 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25830 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25831 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25832 // CHECK19-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
25833 // CHECK19-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
25834 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25835 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25836 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25837 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25838 // CHECK19:       omp.precond.then:
25839 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25840 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25841 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
25842 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25843 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25844 // CHECK19-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
25845 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25846 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25847 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25848 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25849 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25850 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25851 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25852 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25853 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25854 // CHECK19-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25855 // CHECK19:       cond.true:
25856 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25857 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25858 // CHECK19:       cond.false:
25859 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25860 // CHECK19-NEXT:    br label [[COND_END]]
25861 // CHECK19:       cond.end:
25862 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25863 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25864 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25865 // CHECK19-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25866 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25867 // CHECK19:       omp.inner.for.cond:
25868 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
25869 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
25870 // CHECK19-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25871 // CHECK19-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25872 // CHECK19:       omp.inner.for.body:
25873 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
25874 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
25875 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25876 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !32
25877 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !32
25878 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
25879 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
25880 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25881 // CHECK19:       omp.body.continue:
25882 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25883 // CHECK19:       omp.inner.for.inc:
25884 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
25885 // CHECK19-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
25886 // CHECK19-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
25887 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
25888 // CHECK19:       omp.inner.for.end:
25889 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25890 // CHECK19:       omp.loop.exit:
25891 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25892 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
25893 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
25894 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25895 // CHECK19-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
25896 // CHECK19-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25897 // CHECK19:       .omp.final.then:
25898 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25899 // CHECK19-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP24]], 0
25900 // CHECK19-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
25901 // CHECK19-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
25902 // CHECK19-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
25903 // CHECK19-NEXT:    store i32 [[ADD11]], i32* [[I4]], align 4
25904 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25905 // CHECK19:       .omp.final.done:
25906 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
25907 // CHECK19:       omp.precond.end:
25908 // CHECK19-NEXT:    ret void
25909 //
25910 //
25911 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
25912 // CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25913 // CHECK19-NEXT:  entry:
25914 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25915 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25916 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25917 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25918 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25919 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25920 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25921 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25922 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25923 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25924 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
25925 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
25926 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
25927 // CHECK19-NEXT:    ret void
25928 //
25929 //
25930 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8
25931 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
25932 // CHECK19-NEXT:  entry:
25933 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25934 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25935 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25936 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25937 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25938 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25939 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25940 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25941 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25942 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
25943 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25944 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25945 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25946 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25947 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
25948 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25949 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25950 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25951 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25952 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25953 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25954 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25955 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
25956 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25957 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
25958 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25959 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
25960 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25961 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25962 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25963 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
25964 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25965 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
25966 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25967 // CHECK19:       omp.precond.then:
25968 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25969 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25970 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
25971 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25972 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25973 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25974 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
25975 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25976 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25977 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25978 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
25979 // CHECK19-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25980 // CHECK19:       cond.true:
25981 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25982 // CHECK19-NEXT:    br label [[COND_END:%.*]]
25983 // CHECK19:       cond.false:
25984 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25985 // CHECK19-NEXT:    br label [[COND_END]]
25986 // CHECK19:       cond.end:
25987 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
25988 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25989 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25990 // CHECK19-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
25991 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25992 // CHECK19:       omp.inner.for.cond:
25993 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
25994 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
25995 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
25996 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25997 // CHECK19:       omp.inner.for.body:
25998 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
25999 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
26000 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35
26001 // CHECK19-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35
26002 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35
26003 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35
26004 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26005 // CHECK19:       omp.inner.for.inc:
26006 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
26007 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
26008 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
26009 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
26010 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
26011 // CHECK19:       omp.inner.for.end:
26012 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26013 // CHECK19:       omp.loop.exit:
26014 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26015 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
26016 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
26017 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26018 // CHECK19-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
26019 // CHECK19-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26020 // CHECK19:       .omp.final.then:
26021 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26022 // CHECK19-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
26023 // CHECK19-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26024 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
26025 // CHECK19-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
26026 // CHECK19-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
26027 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26028 // CHECK19:       .omp.final.done:
26029 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
26030 // CHECK19:       omp.precond.end:
26031 // CHECK19-NEXT:    ret void
26032 //
26033 //
26034 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9
26035 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
26036 // CHECK19-NEXT:  entry:
26037 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26038 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26039 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26040 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26041 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26042 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
26043 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26044 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26045 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26046 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26047 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26048 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26049 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26050 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26051 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26052 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26053 // CHECK19-NEXT:    [[I3:%.*]] = alloca i32, align 4
26054 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26055 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26056 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26057 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26058 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26059 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
26060 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26061 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
26062 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
26063 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26064 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
26065 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26066 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
26067 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26068 // CHECK19-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26069 // CHECK19-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26070 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
26071 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26072 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
26073 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26074 // CHECK19:       omp.precond.then:
26075 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26076 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26077 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
26078 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26079 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26080 // CHECK19-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
26081 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26082 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26083 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26084 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26085 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26086 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26087 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26088 // CHECK19-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
26089 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26090 // CHECK19:       omp.dispatch.cond:
26091 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26092 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
26093 // CHECK19-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
26094 // CHECK19-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
26095 // CHECK19-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26096 // CHECK19:       omp.dispatch.body:
26097 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26098 // CHECK19-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
26099 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26100 // CHECK19:       omp.inner.for.cond:
26101 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
26102 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
26103 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
26104 // CHECK19-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26105 // CHECK19:       omp.inner.for.body:
26106 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
26107 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
26108 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26109 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !38
26110 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !38
26111 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
26112 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
26113 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26114 // CHECK19:       omp.body.continue:
26115 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26116 // CHECK19:       omp.inner.for.inc:
26117 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
26118 // CHECK19-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
26119 // CHECK19-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
26120 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
26121 // CHECK19:       omp.inner.for.end:
26122 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
26123 // CHECK19:       omp.dispatch.inc:
26124 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
26125 // CHECK19:       omp.dispatch.end:
26126 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26127 // CHECK19-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
26128 // CHECK19-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26129 // CHECK19:       .omp.final.then:
26130 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26131 // CHECK19-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP23]], 0
26132 // CHECK19-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26133 // CHECK19-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
26134 // CHECK19-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
26135 // CHECK19-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
26136 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26137 // CHECK19:       .omp.final.done:
26138 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
26139 // CHECK19:       omp.precond.end:
26140 // CHECK19-NEXT:    ret void
26141 //
26142 //
26143 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
26144 // CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26145 // CHECK19-NEXT:  entry:
26146 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26147 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
26148 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26149 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26150 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
26151 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26152 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26153 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
26154 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26155 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26156 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
26157 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
26158 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26159 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
26160 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
26161 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26162 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26163 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26164 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
26165 // CHECK19-NEXT:    ret void
26166 //
26167 //
26168 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11
26169 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26170 // CHECK19-NEXT:  entry:
26171 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26172 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26173 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26174 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
26175 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26176 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26177 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26178 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26179 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26180 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26181 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26182 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26183 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26184 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26185 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26186 // CHECK19-NEXT:    [[I4:%.*]] = alloca i32, align 4
26187 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
26188 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26189 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26190 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26191 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26192 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
26193 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26194 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26195 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
26196 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
26197 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26198 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26199 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26200 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
26201 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26202 // CHECK19-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26203 // CHECK19-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26204 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
26205 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26206 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
26207 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26208 // CHECK19:       omp.precond.then:
26209 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26210 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26211 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
26212 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26213 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26214 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26215 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
26216 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26217 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26218 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26219 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
26220 // CHECK19-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26221 // CHECK19:       cond.true:
26222 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26223 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26224 // CHECK19:       cond.false:
26225 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26226 // CHECK19-NEXT:    br label [[COND_END]]
26227 // CHECK19:       cond.end:
26228 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
26229 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26230 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26231 // CHECK19-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
26232 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26233 // CHECK19:       omp.inner.for.cond:
26234 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
26235 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
26236 // CHECK19-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
26237 // CHECK19-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26238 // CHECK19:       omp.inner.for.body:
26239 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !41
26240 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
26241 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !41
26242 // CHECK19-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !41
26243 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !41
26244 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41
26245 // CHECK19-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
26246 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
26247 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41
26248 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26249 // CHECK19:       omp.inner.for.inc:
26250 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
26251 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !41
26252 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
26253 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
26254 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
26255 // CHECK19:       omp.inner.for.end:
26256 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26257 // CHECK19:       omp.loop.exit:
26258 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26259 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
26260 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
26261 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26262 // CHECK19-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
26263 // CHECK19-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26264 // CHECK19:       .omp.final.then:
26265 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26266 // CHECK19-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
26267 // CHECK19-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
26268 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
26269 // CHECK19-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
26270 // CHECK19-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
26271 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26272 // CHECK19:       .omp.final.done:
26273 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
26274 // CHECK19:       omp.precond.end:
26275 // CHECK19-NEXT:    ret void
26276 //
26277 //
26278 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12
26279 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26280 // CHECK19-NEXT:  entry:
26281 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26282 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26283 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26284 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26285 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26286 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
26287 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26288 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26289 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26290 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26291 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26292 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26293 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26294 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26295 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26296 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26297 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26298 // CHECK19-NEXT:    [[I4:%.*]] = alloca i32, align 4
26299 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26300 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26301 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26302 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26303 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26304 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
26305 // CHECK19-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26306 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26307 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
26308 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
26309 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26310 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26311 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26312 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
26313 // CHECK19-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26314 // CHECK19-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26315 // CHECK19-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26316 // CHECK19-NEXT:    store i32 0, i32* [[I]], align 4
26317 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26318 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
26319 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26320 // CHECK19:       omp.precond.then:
26321 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26322 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26323 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
26324 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26325 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26326 // CHECK19-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
26327 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26328 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26329 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26330 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26331 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26332 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26333 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26334 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
26335 // CHECK19-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
26336 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26337 // CHECK19:       omp.dispatch.cond:
26338 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26339 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
26340 // CHECK19-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
26341 // CHECK19-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
26342 // CHECK19-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26343 // CHECK19:       omp.dispatch.body:
26344 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26345 // CHECK19-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26346 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26347 // CHECK19:       omp.inner.for.cond:
26348 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
26349 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
26350 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26351 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26352 // CHECK19:       omp.inner.for.body:
26353 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
26354 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
26355 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26356 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !44
26357 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !44
26358 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
26359 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
26360 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26361 // CHECK19:       omp.body.continue:
26362 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26363 // CHECK19:       omp.inner.for.inc:
26364 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
26365 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
26366 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
26367 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
26368 // CHECK19:       omp.inner.for.end:
26369 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
26370 // CHECK19:       omp.dispatch.inc:
26371 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
26372 // CHECK19:       omp.dispatch.end:
26373 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26374 // CHECK19-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
26375 // CHECK19-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26376 // CHECK19:       .omp.final.then:
26377 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26378 // CHECK19-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
26379 // CHECK19-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
26380 // CHECK19-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
26381 // CHECK19-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
26382 // CHECK19-NEXT:    store i32 [[ADD10]], i32* [[I4]], align 4
26383 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26384 // CHECK19:       .omp.final.done:
26385 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
26386 // CHECK19:       omp.precond.end:
26387 // CHECK19-NEXT:    ret void
26388 //
26389 //
26390 // CHECK19-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
26391 // CHECK19-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
26392 // CHECK19-NEXT:  entry:
26393 // CHECK19-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
26394 // CHECK19-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
26395 // CHECK19-NEXT:    [[M:%.*]] = alloca i32, align 4
26396 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
26397 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
26398 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
26399 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26400 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
26401 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
26402 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
26403 // CHECK19-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
26404 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26405 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26406 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
26407 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
26408 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
26409 // CHECK19-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
26410 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
26411 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
26412 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
26413 // CHECK19-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
26414 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
26415 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
26416 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
26417 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
26418 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
26419 // CHECK19-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
26420 // CHECK19-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
26421 // CHECK19-NEXT:    store i32 10, i32* [[M]], align 4
26422 // CHECK19-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
26423 // CHECK19-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
26424 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
26425 // CHECK19-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
26426 // CHECK19-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
26427 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
26428 // CHECK19-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
26429 // CHECK19-NEXT:    store i8* null, i8** [[TMP4]], align 4
26430 // CHECK19-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
26431 // CHECK19-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
26432 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
26433 // CHECK19-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
26434 // CHECK19-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
26435 // CHECK19-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
26436 // CHECK19:       omp_offload.failed:
26437 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
26438 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT]]
26439 // CHECK19:       omp_offload.cont:
26440 // CHECK19-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
26441 // CHECK19-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
26442 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
26443 // CHECK19-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
26444 // CHECK19-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
26445 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
26446 // CHECK19-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
26447 // CHECK19-NEXT:    store i8* null, i8** [[TMP13]], align 4
26448 // CHECK19-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
26449 // CHECK19-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
26450 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
26451 // CHECK19-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
26452 // CHECK19-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
26453 // CHECK19-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
26454 // CHECK19:       omp_offload.failed5:
26455 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
26456 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
26457 // CHECK19:       omp_offload.cont6:
26458 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
26459 // CHECK19-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
26460 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26461 // CHECK19-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26462 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26463 // CHECK19-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
26464 // CHECK19-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
26465 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
26466 // CHECK19-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
26467 // CHECK19-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
26468 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
26469 // CHECK19-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
26470 // CHECK19-NEXT:    store i8* null, i8** [[TMP25]], align 4
26471 // CHECK19-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
26472 // CHECK19-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
26473 // CHECK19-NEXT:    store i32 [[TMP20]], i32* [[TMP27]], align 4
26474 // CHECK19-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
26475 // CHECK19-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
26476 // CHECK19-NEXT:    store i32 [[TMP20]], i32* [[TMP29]], align 4
26477 // CHECK19-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
26478 // CHECK19-NEXT:    store i8* null, i8** [[TMP30]], align 4
26479 // CHECK19-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
26480 // CHECK19-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
26481 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
26482 // CHECK19-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
26483 // CHECK19-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
26484 // CHECK19-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
26485 // CHECK19:       omp_offload.failed11:
26486 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
26487 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
26488 // CHECK19:       omp_offload.cont12:
26489 // CHECK19-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
26490 // CHECK19-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
26491 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
26492 // CHECK19-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
26493 // CHECK19-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
26494 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
26495 // CHECK19-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
26496 // CHECK19-NEXT:    store i8* null, i8** [[TMP39]], align 4
26497 // CHECK19-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
26498 // CHECK19-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
26499 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
26500 // CHECK19-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
26501 // CHECK19-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
26502 // CHECK19-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
26503 // CHECK19:       omp_offload.failed17:
26504 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
26505 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
26506 // CHECK19:       omp_offload.cont18:
26507 // CHECK19-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
26508 // CHECK19-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
26509 // CHECK19-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
26510 // CHECK19-NEXT:    store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
26511 // CHECK19-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
26512 // CHECK19-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
26513 // CHECK19-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
26514 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
26515 // CHECK19-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
26516 // CHECK19-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
26517 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
26518 // CHECK19-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
26519 // CHECK19-NEXT:    store i8* null, i8** [[TMP51]], align 4
26520 // CHECK19-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
26521 // CHECK19-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
26522 // CHECK19-NEXT:    store i32 [[TMP46]], i32* [[TMP53]], align 4
26523 // CHECK19-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
26524 // CHECK19-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
26525 // CHECK19-NEXT:    store i32 [[TMP46]], i32* [[TMP55]], align 4
26526 // CHECK19-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
26527 // CHECK19-NEXT:    store i8* null, i8** [[TMP56]], align 4
26528 // CHECK19-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
26529 // CHECK19-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
26530 // CHECK19-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
26531 // CHECK19-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
26532 // CHECK19-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
26533 // CHECK19-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
26534 // CHECK19:       omp_offload.failed25:
26535 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
26536 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
26537 // CHECK19:       omp_offload.cont26:
26538 // CHECK19-NEXT:    ret i32 0
26539 //
26540 //
26541 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
26542 // CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26543 // CHECK19-NEXT:  entry:
26544 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26545 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26546 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26547 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
26548 // CHECK19-NEXT:    ret void
26549 //
26550 //
26551 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14
26552 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26553 // CHECK19-NEXT:  entry:
26554 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26555 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26556 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26557 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26558 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26559 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26560 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26561 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26562 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26563 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26564 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26565 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26566 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26567 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26568 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26569 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
26570 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26571 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26572 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26573 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
26574 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26575 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26576 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
26577 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26578 // CHECK19:       cond.true:
26579 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26580 // CHECK19:       cond.false:
26581 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26582 // CHECK19-NEXT:    br label [[COND_END]]
26583 // CHECK19:       cond.end:
26584 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
26585 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26586 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26587 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
26588 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26589 // CHECK19:       omp.inner.for.cond:
26590 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
26591 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
26592 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
26593 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26594 // CHECK19:       omp.inner.for.body:
26595 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47
26596 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
26597 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47
26598 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26599 // CHECK19:       omp.inner.for.inc:
26600 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
26601 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47
26602 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
26603 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
26604 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
26605 // CHECK19:       omp.inner.for.end:
26606 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26607 // CHECK19:       omp.loop.exit:
26608 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
26609 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26610 // CHECK19-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
26611 // CHECK19-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26612 // CHECK19:       .omp.final.then:
26613 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
26614 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26615 // CHECK19:       .omp.final.done:
26616 // CHECK19-NEXT:    ret void
26617 //
26618 //
26619 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15
26620 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26621 // CHECK19-NEXT:  entry:
26622 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26623 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26624 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26625 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26626 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26627 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26628 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26629 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26630 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26631 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26632 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26633 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26634 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26635 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26636 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26637 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26638 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26639 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26640 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26641 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26642 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26643 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26644 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
26645 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
26646 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26647 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26648 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26649 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
26650 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26651 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26652 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
26653 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26654 // CHECK19:       cond.true:
26655 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26656 // CHECK19:       cond.false:
26657 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26658 // CHECK19-NEXT:    br label [[COND_END]]
26659 // CHECK19:       cond.end:
26660 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
26661 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26662 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26663 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
26664 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26665 // CHECK19:       omp.inner.for.cond:
26666 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
26667 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50
26668 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
26669 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26670 // CHECK19:       omp.inner.for.body:
26671 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
26672 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
26673 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26674 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50
26675 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !50
26676 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
26677 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !50
26678 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26679 // CHECK19:       omp.body.continue:
26680 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26681 // CHECK19:       omp.inner.for.inc:
26682 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
26683 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
26684 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
26685 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
26686 // CHECK19:       omp.inner.for.end:
26687 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26688 // CHECK19:       omp.loop.exit:
26689 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
26690 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26691 // CHECK19-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
26692 // CHECK19-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26693 // CHECK19:       .omp.final.then:
26694 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
26695 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26696 // CHECK19:       .omp.final.done:
26697 // CHECK19-NEXT:    ret void
26698 //
26699 //
26700 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
26701 // CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26702 // CHECK19-NEXT:  entry:
26703 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26704 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26705 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26706 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
26707 // CHECK19-NEXT:    ret void
26708 //
26709 //
26710 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17
26711 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26712 // CHECK19-NEXT:  entry:
26713 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26714 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26715 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26716 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26717 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26718 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26719 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26720 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26721 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26722 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26723 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26724 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26725 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26726 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26727 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26728 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
26729 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26730 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26731 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26732 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
26733 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26734 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26735 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
26736 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26737 // CHECK19:       cond.true:
26738 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26739 // CHECK19:       cond.false:
26740 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26741 // CHECK19-NEXT:    br label [[COND_END]]
26742 // CHECK19:       cond.end:
26743 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
26744 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26745 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26746 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
26747 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26748 // CHECK19:       omp.inner.for.cond:
26749 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
26750 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
26751 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
26752 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26753 // CHECK19:       omp.inner.for.body:
26754 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53
26755 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
26756 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53
26757 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26758 // CHECK19:       omp.inner.for.inc:
26759 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
26760 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !53
26761 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
26762 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
26763 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
26764 // CHECK19:       omp.inner.for.end:
26765 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26766 // CHECK19:       omp.loop.exit:
26767 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
26768 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26769 // CHECK19-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
26770 // CHECK19-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26771 // CHECK19:       .omp.final.then:
26772 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
26773 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26774 // CHECK19:       .omp.final.done:
26775 // CHECK19-NEXT:    ret void
26776 //
26777 //
26778 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18
26779 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
26780 // CHECK19-NEXT:  entry:
26781 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26782 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26783 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26784 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26785 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26786 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26787 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26788 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26789 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26790 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26791 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26792 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26793 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26794 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26795 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26796 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26797 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26798 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26799 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26800 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26801 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26802 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26803 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
26804 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
26805 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26806 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26807 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26808 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
26809 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26810 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26811 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
26812 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26813 // CHECK19:       cond.true:
26814 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26815 // CHECK19:       cond.false:
26816 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26817 // CHECK19-NEXT:    br label [[COND_END]]
26818 // CHECK19:       cond.end:
26819 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
26820 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26821 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26822 // CHECK19-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
26823 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26824 // CHECK19:       omp.inner.for.cond:
26825 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
26826 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !56
26827 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
26828 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26829 // CHECK19:       omp.inner.for.body:
26830 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
26831 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
26832 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26833 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !56
26834 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !56
26835 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
26836 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !56
26837 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26838 // CHECK19:       omp.body.continue:
26839 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26840 // CHECK19:       omp.inner.for.inc:
26841 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
26842 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
26843 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
26844 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
26845 // CHECK19:       omp.inner.for.end:
26846 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26847 // CHECK19:       omp.loop.exit:
26848 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
26849 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26850 // CHECK19-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
26851 // CHECK19-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26852 // CHECK19:       .omp.final.then:
26853 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
26854 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26855 // CHECK19:       .omp.final.done:
26856 // CHECK19-NEXT:    ret void
26857 //
26858 //
26859 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
26860 // CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26861 // CHECK19-NEXT:  entry:
26862 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26863 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26864 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26865 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26866 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26867 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26868 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26869 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26870 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
26871 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
26872 // CHECK19-NEXT:    ret void
26873 //
26874 //
26875 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21
26876 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26877 // CHECK19-NEXT:  entry:
26878 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26879 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26880 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26881 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26882 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26883 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26884 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26885 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26886 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26887 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26888 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26889 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26890 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26891 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26892 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26893 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26894 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26895 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26896 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
26897 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26898 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26899 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26900 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
26901 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26902 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26903 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
26904 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26905 // CHECK19:       cond.true:
26906 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26907 // CHECK19:       cond.false:
26908 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26909 // CHECK19-NEXT:    br label [[COND_END]]
26910 // CHECK19:       cond.end:
26911 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
26912 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26913 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26914 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
26915 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26916 // CHECK19:       omp.inner.for.cond:
26917 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
26918 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
26919 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
26920 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26921 // CHECK19:       omp.inner.for.body:
26922 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59
26923 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
26924 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59
26925 // CHECK19-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
26926 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
26927 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59
26928 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26929 // CHECK19:       omp.inner.for.inc:
26930 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
26931 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59
26932 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
26933 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
26934 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
26935 // CHECK19:       omp.inner.for.end:
26936 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26937 // CHECK19:       omp.loop.exit:
26938 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
26939 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26940 // CHECK19-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
26941 // CHECK19-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26942 // CHECK19:       .omp.final.then:
26943 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
26944 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26945 // CHECK19:       .omp.final.done:
26946 // CHECK19-NEXT:    ret void
26947 //
26948 //
26949 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22
26950 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
26951 // CHECK19-NEXT:  entry:
26952 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26953 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26954 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26955 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26956 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
26957 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26958 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26959 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26960 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26961 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26962 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26963 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26964 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
26965 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26966 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26967 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26968 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26969 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
26970 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26971 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
26972 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26973 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26974 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26975 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26976 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
26977 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
26978 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26979 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26980 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26981 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26982 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
26983 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
26984 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26985 // CHECK19:       omp.dispatch.cond:
26986 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26987 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26988 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
26989 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26990 // CHECK19:       cond.true:
26991 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26992 // CHECK19-NEXT:    br label [[COND_END:%.*]]
26993 // CHECK19:       cond.false:
26994 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26995 // CHECK19-NEXT:    br label [[COND_END]]
26996 // CHECK19:       cond.end:
26997 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
26998 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26999 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27000 // CHECK19-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
27001 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
27002 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27003 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
27004 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
27005 // CHECK19:       omp.dispatch.body:
27006 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27007 // CHECK19:       omp.inner.for.cond:
27008 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
27009 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62
27010 // CHECK19-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
27011 // CHECK19-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27012 // CHECK19:       omp.inner.for.body:
27013 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
27014 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
27015 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27016 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62
27017 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !62
27018 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
27019 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !62
27020 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27021 // CHECK19:       omp.body.continue:
27022 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27023 // CHECK19:       omp.inner.for.inc:
27024 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
27025 // CHECK19-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
27026 // CHECK19-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
27027 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
27028 // CHECK19:       omp.inner.for.end:
27029 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27030 // CHECK19:       omp.dispatch.inc:
27031 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27032 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
27033 // CHECK19-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
27034 // CHECK19-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
27035 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27036 // CHECK19-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
27037 // CHECK19-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
27038 // CHECK19-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
27039 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
27040 // CHECK19:       omp.dispatch.end:
27041 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
27042 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27043 // CHECK19-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
27044 // CHECK19-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27045 // CHECK19:       .omp.final.then:
27046 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
27047 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27048 // CHECK19:       .omp.final.done:
27049 // CHECK19-NEXT:    ret void
27050 //
27051 //
27052 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
27053 // CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
27054 // CHECK19-NEXT:  entry:
27055 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27056 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27057 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27058 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
27059 // CHECK19-NEXT:    ret void
27060 //
27061 //
27062 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25
27063 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
27064 // CHECK19-NEXT:  entry:
27065 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27066 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27067 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27068 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27069 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27070 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
27071 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
27072 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27073 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27074 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
27075 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27076 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27077 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27078 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27079 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
27080 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
27081 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27082 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27083 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27084 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
27085 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27086 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27087 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
27088 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27089 // CHECK19:       cond.true:
27090 // CHECK19-NEXT:    br label [[COND_END:%.*]]
27091 // CHECK19:       cond.false:
27092 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27093 // CHECK19-NEXT:    br label [[COND_END]]
27094 // CHECK19:       cond.end:
27095 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
27096 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
27097 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
27098 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
27099 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27100 // CHECK19:       omp.inner.for.cond:
27101 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
27102 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
27103 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
27104 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27105 // CHECK19:       omp.inner.for.body:
27106 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65
27107 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
27108 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65
27109 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27110 // CHECK19:       omp.inner.for.inc:
27111 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
27112 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !65
27113 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
27114 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
27115 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
27116 // CHECK19:       omp.inner.for.end:
27117 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27118 // CHECK19:       omp.loop.exit:
27119 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
27120 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27121 // CHECK19-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
27122 // CHECK19-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27123 // CHECK19:       .omp.final.then:
27124 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
27125 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27126 // CHECK19:       .omp.final.done:
27127 // CHECK19-NEXT:    ret void
27128 //
27129 //
27130 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26
27131 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
27132 // CHECK19-NEXT:  entry:
27133 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27134 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27135 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
27136 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
27137 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27138 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27139 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27140 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27141 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27142 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27143 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27144 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
27145 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27146 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27147 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27148 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27149 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27150 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27151 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27152 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27153 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27154 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27155 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
27156 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27157 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27158 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27159 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27160 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27161 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27162 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
27163 // CHECK19-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
27164 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
27165 // CHECK19:       omp.dispatch.cond:
27166 // CHECK19-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
27167 // CHECK19-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
27168 // CHECK19-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
27169 // CHECK19:       omp.dispatch.body:
27170 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27171 // CHECK19-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
27172 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27173 // CHECK19:       omp.inner.for.cond:
27174 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
27175 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !68
27176 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
27177 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27178 // CHECK19:       omp.inner.for.body:
27179 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
27180 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
27181 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27182 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !68
27183 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !68
27184 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
27185 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !68
27186 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27187 // CHECK19:       omp.body.continue:
27188 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27189 // CHECK19:       omp.inner.for.inc:
27190 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
27191 // CHECK19-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
27192 // CHECK19-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
27193 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
27194 // CHECK19:       omp.inner.for.end:
27195 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27196 // CHECK19:       omp.dispatch.inc:
27197 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
27198 // CHECK19:       omp.dispatch.end:
27199 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27200 // CHECK19-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
27201 // CHECK19-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27202 // CHECK19:       .omp.final.then:
27203 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
27204 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27205 // CHECK19:       .omp.final.done:
27206 // CHECK19-NEXT:    ret void
27207 //
27208 //
27209 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
27210 // CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
27211 // CHECK19-NEXT:  entry:
27212 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27213 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
27214 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
27215 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27216 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27217 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27218 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27219 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
27220 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
27221 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
27222 // CHECK19-NEXT:    ret void
27223 //
27224 //
27225 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29
27226 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
27227 // CHECK19-NEXT:  entry:
27228 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27229 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27230 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27231 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
27232 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27233 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27234 // CHECK19-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
27235 // CHECK19-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
27236 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27237 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27238 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
27239 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
27240 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27241 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27242 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27243 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27244 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27245 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
27246 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
27247 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27248 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27249 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27250 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
27251 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27252 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27253 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
27254 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27255 // CHECK19:       cond.true:
27256 // CHECK19-NEXT:    br label [[COND_END:%.*]]
27257 // CHECK19:       cond.false:
27258 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27259 // CHECK19-NEXT:    br label [[COND_END]]
27260 // CHECK19:       cond.end:
27261 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
27262 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
27263 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
27264 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
27265 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27266 // CHECK19:       omp.inner.for.cond:
27267 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
27268 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
27269 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
27270 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27271 // CHECK19:       omp.inner.for.body:
27272 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !71
27273 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
27274 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71
27275 // CHECK19-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
27276 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
27277 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71
27278 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27279 // CHECK19:       omp.inner.for.inc:
27280 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
27281 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !71
27282 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
27283 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
27284 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
27285 // CHECK19:       omp.inner.for.end:
27286 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27287 // CHECK19:       omp.loop.exit:
27288 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
27289 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27290 // CHECK19-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
27291 // CHECK19-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27292 // CHECK19:       .omp.final.then:
27293 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
27294 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27295 // CHECK19:       .omp.final.done:
27296 // CHECK19-NEXT:    ret void
27297 //
27298 //
27299 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30
27300 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
27301 // CHECK19-NEXT:  entry:
27302 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27303 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27304 // CHECK19-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
27305 // CHECK19-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
27306 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
27307 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
27308 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27309 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27310 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27311 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27312 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27313 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27314 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
27315 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27316 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27317 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27318 // CHECK19-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27319 // CHECK19-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
27320 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27321 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
27322 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27323 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27324 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27325 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27326 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
27327 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27328 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27329 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27330 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27331 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27332 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27333 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27334 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
27335 // CHECK19-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
27336 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
27337 // CHECK19:       omp.dispatch.cond:
27338 // CHECK19-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
27339 // CHECK19-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
27340 // CHECK19-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
27341 // CHECK19:       omp.dispatch.body:
27342 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27343 // CHECK19-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
27344 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27345 // CHECK19:       omp.inner.for.cond:
27346 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
27347 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !74
27348 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
27349 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27350 // CHECK19:       omp.inner.for.body:
27351 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
27352 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
27353 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27354 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !74
27355 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !74
27356 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
27357 // CHECK19-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !74
27358 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27359 // CHECK19:       omp.body.continue:
27360 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27361 // CHECK19:       omp.inner.for.inc:
27362 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
27363 // CHECK19-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
27364 // CHECK19-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
27365 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
27366 // CHECK19:       omp.inner.for.end:
27367 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27368 // CHECK19:       omp.dispatch.inc:
27369 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
27370 // CHECK19:       omp.dispatch.end:
27371 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27372 // CHECK19-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
27373 // CHECK19-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27374 // CHECK19:       .omp.final.then:
27375 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
27376 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27377 // CHECK19:       .omp.final.done:
27378 // CHECK19-NEXT:    ret void
27379 //
27380 //
27381 // CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
27382 // CHECK19-SAME: () #[[ATTR5:[0-9]+]] {
27383 // CHECK19-NEXT:  entry:
27384 // CHECK19-NEXT:    call void @__tgt_register_requires(i64 1)
27385 // CHECK19-NEXT:    ret void
27386 //
27387 //
27388 // CHECK20-LABEL: define {{[^@]+}}@main
27389 // CHECK20-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
27390 // CHECK20-NEXT:  entry:
27391 // CHECK20-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
27392 // CHECK20-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
27393 // CHECK20-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
27394 // CHECK20-NEXT:    [[N:%.*]] = alloca i32, align 4
27395 // CHECK20-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27396 // CHECK20-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27397 // CHECK20-NEXT:    [[M:%.*]] = alloca i32, align 4
27398 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
27399 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
27400 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
27401 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
27402 // CHECK20-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
27403 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27404 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27405 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27406 // CHECK20-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
27407 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
27408 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
27409 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
27410 // CHECK20-NEXT:    [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
27411 // CHECK20-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
27412 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
27413 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
27414 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
27415 // CHECK20-NEXT:    [[N_CASTED18:%.*]] = alloca i32, align 4
27416 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
27417 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
27418 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
27419 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
27420 // CHECK20-NEXT:    [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
27421 // CHECK20-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
27422 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
27423 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
27424 // CHECK20-NEXT:    [[N_CASTED32:%.*]] = alloca i32, align 4
27425 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
27426 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
27427 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
27428 // CHECK20-NEXT:    [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
27429 // CHECK20-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
27430 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
27431 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
27432 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
27433 // CHECK20-NEXT:    [[N_CASTED47:%.*]] = alloca i32, align 4
27434 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
27435 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
27436 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
27437 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
27438 // CHECK20-NEXT:    [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
27439 // CHECK20-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
27440 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
27441 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
27442 // CHECK20-NEXT:    store i32 0, i32* [[RETVAL]], align 4
27443 // CHECK20-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
27444 // CHECK20-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
27445 // CHECK20-NEXT:    store i32 100, i32* [[N]], align 4
27446 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
27447 // CHECK20-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
27448 // CHECK20-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
27449 // CHECK20-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
27450 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
27451 // CHECK20-NEXT:    store i32 10, i32* [[M]], align 4
27452 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
27453 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
27454 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
27455 // CHECK20-NEXT:    [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
27456 // CHECK20-NEXT:    [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
27457 // CHECK20-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
27458 // CHECK20-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
27459 // CHECK20-NEXT:    store i32 [[TMP3]], i32* [[TMP7]], align 4
27460 // CHECK20-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
27461 // CHECK20-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
27462 // CHECK20-NEXT:    store i32 [[TMP3]], i32* [[TMP9]], align 4
27463 // CHECK20-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
27464 // CHECK20-NEXT:    store i64 4, i64* [[TMP10]], align 4
27465 // CHECK20-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
27466 // CHECK20-NEXT:    store i8* null, i8** [[TMP11]], align 4
27467 // CHECK20-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
27468 // CHECK20-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
27469 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP13]], align 4
27470 // CHECK20-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
27471 // CHECK20-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
27472 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP15]], align 4
27473 // CHECK20-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
27474 // CHECK20-NEXT:    store i64 4, i64* [[TMP16]], align 4
27475 // CHECK20-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
27476 // CHECK20-NEXT:    store i8* null, i8** [[TMP17]], align 4
27477 // CHECK20-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
27478 // CHECK20-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
27479 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP19]], align 4
27480 // CHECK20-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
27481 // CHECK20-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
27482 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP21]], align 4
27483 // CHECK20-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
27484 // CHECK20-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 4
27485 // CHECK20-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
27486 // CHECK20-NEXT:    store i8* null, i8** [[TMP23]], align 4
27487 // CHECK20-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
27488 // CHECK20-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
27489 // CHECK20-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
27490 // CHECK20-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
27491 // CHECK20-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
27492 // CHECK20-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27493 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
27494 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27495 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27496 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27497 // CHECK20-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27498 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
27499 // CHECK20-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
27500 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
27501 // CHECK20-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
27502 // CHECK20-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
27503 // CHECK20-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
27504 // CHECK20:       omp_offload.failed:
27505 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
27506 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT]]
27507 // CHECK20:       omp_offload.cont:
27508 // CHECK20-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
27509 // CHECK20-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
27510 // CHECK20-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
27511 // CHECK20-NEXT:    [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
27512 // CHECK20-NEXT:    [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
27513 // CHECK20-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
27514 // CHECK20-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
27515 // CHECK20-NEXT:    store i32 [[TMP34]], i32* [[TMP38]], align 4
27516 // CHECK20-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
27517 // CHECK20-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
27518 // CHECK20-NEXT:    store i32 [[TMP34]], i32* [[TMP40]], align 4
27519 // CHECK20-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
27520 // CHECK20-NEXT:    store i64 4, i64* [[TMP41]], align 4
27521 // CHECK20-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
27522 // CHECK20-NEXT:    store i8* null, i8** [[TMP42]], align 4
27523 // CHECK20-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
27524 // CHECK20-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
27525 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP44]], align 4
27526 // CHECK20-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
27527 // CHECK20-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
27528 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP46]], align 4
27529 // CHECK20-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
27530 // CHECK20-NEXT:    store i64 4, i64* [[TMP47]], align 4
27531 // CHECK20-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
27532 // CHECK20-NEXT:    store i8* null, i8** [[TMP48]], align 4
27533 // CHECK20-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
27534 // CHECK20-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
27535 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP50]], align 4
27536 // CHECK20-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
27537 // CHECK20-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
27538 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP52]], align 4
27539 // CHECK20-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
27540 // CHECK20-NEXT:    store i64 [[TMP36]], i64* [[TMP53]], align 4
27541 // CHECK20-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
27542 // CHECK20-NEXT:    store i8* null, i8** [[TMP54]], align 4
27543 // CHECK20-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
27544 // CHECK20-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
27545 // CHECK20-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
27546 // CHECK20-NEXT:    [[TMP58:%.*]] = load i32, i32* [[N]], align 4
27547 // CHECK20-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
27548 // CHECK20-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
27549 // CHECK20-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
27550 // CHECK20-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
27551 // CHECK20-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
27552 // CHECK20-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
27553 // CHECK20-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
27554 // CHECK20-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
27555 // CHECK20-NEXT:    [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
27556 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
27557 // CHECK20-NEXT:    [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
27558 // CHECK20-NEXT:    [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
27559 // CHECK20-NEXT:    br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
27560 // CHECK20:       omp_offload.failed15:
27561 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
27562 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
27563 // CHECK20:       omp_offload.cont16:
27564 // CHECK20-NEXT:    [[TMP64:%.*]] = load i32, i32* [[M]], align 4
27565 // CHECK20-NEXT:    store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
27566 // CHECK20-NEXT:    [[TMP65:%.*]] = load i32, i32* [[N]], align 4
27567 // CHECK20-NEXT:    store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
27568 // CHECK20-NEXT:    [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
27569 // CHECK20-NEXT:    [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
27570 // CHECK20-NEXT:    store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
27571 // CHECK20-NEXT:    [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
27572 // CHECK20-NEXT:    [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
27573 // CHECK20-NEXT:    [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
27574 // CHECK20-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
27575 // CHECK20-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
27576 // CHECK20-NEXT:    store i32 [[TMP66]], i32* [[TMP72]], align 4
27577 // CHECK20-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
27578 // CHECK20-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
27579 // CHECK20-NEXT:    store i32 [[TMP66]], i32* [[TMP74]], align 4
27580 // CHECK20-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
27581 // CHECK20-NEXT:    store i64 4, i64* [[TMP75]], align 4
27582 // CHECK20-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
27583 // CHECK20-NEXT:    store i8* null, i8** [[TMP76]], align 4
27584 // CHECK20-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
27585 // CHECK20-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
27586 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP78]], align 4
27587 // CHECK20-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
27588 // CHECK20-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
27589 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP80]], align 4
27590 // CHECK20-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
27591 // CHECK20-NEXT:    store i64 4, i64* [[TMP81]], align 4
27592 // CHECK20-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
27593 // CHECK20-NEXT:    store i8* null, i8** [[TMP82]], align 4
27594 // CHECK20-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
27595 // CHECK20-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
27596 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP84]], align 4
27597 // CHECK20-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
27598 // CHECK20-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
27599 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP86]], align 4
27600 // CHECK20-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
27601 // CHECK20-NEXT:    store i64 [[TMP70]], i64* [[TMP87]], align 4
27602 // CHECK20-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
27603 // CHECK20-NEXT:    store i8* null, i8** [[TMP88]], align 4
27604 // CHECK20-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
27605 // CHECK20-NEXT:    [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
27606 // CHECK20-NEXT:    store i32 [[TMP68]], i32* [[TMP90]], align 4
27607 // CHECK20-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
27608 // CHECK20-NEXT:    [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
27609 // CHECK20-NEXT:    store i32 [[TMP68]], i32* [[TMP92]], align 4
27610 // CHECK20-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
27611 // CHECK20-NEXT:    store i64 4, i64* [[TMP93]], align 4
27612 // CHECK20-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
27613 // CHECK20-NEXT:    store i8* null, i8** [[TMP94]], align 4
27614 // CHECK20-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
27615 // CHECK20-NEXT:    [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
27616 // CHECK20-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
27617 // CHECK20-NEXT:    [[TMP98:%.*]] = load i32, i32* [[N]], align 4
27618 // CHECK20-NEXT:    store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
27619 // CHECK20-NEXT:    [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
27620 // CHECK20-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
27621 // CHECK20-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
27622 // CHECK20-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
27623 // CHECK20-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
27624 // CHECK20-NEXT:    [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
27625 // CHECK20-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
27626 // CHECK20-NEXT:    [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
27627 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
27628 // CHECK20-NEXT:    [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
27629 // CHECK20-NEXT:    [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
27630 // CHECK20-NEXT:    br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
27631 // CHECK20:       omp_offload.failed30:
27632 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
27633 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
27634 // CHECK20:       omp_offload.cont31:
27635 // CHECK20-NEXT:    [[TMP104:%.*]] = load i32, i32* [[N]], align 4
27636 // CHECK20-NEXT:    store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
27637 // CHECK20-NEXT:    [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
27638 // CHECK20-NEXT:    [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
27639 // CHECK20-NEXT:    [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
27640 // CHECK20-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
27641 // CHECK20-NEXT:    [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
27642 // CHECK20-NEXT:    store i32 [[TMP105]], i32* [[TMP109]], align 4
27643 // CHECK20-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
27644 // CHECK20-NEXT:    [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
27645 // CHECK20-NEXT:    store i32 [[TMP105]], i32* [[TMP111]], align 4
27646 // CHECK20-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
27647 // CHECK20-NEXT:    store i64 4, i64* [[TMP112]], align 4
27648 // CHECK20-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
27649 // CHECK20-NEXT:    store i8* null, i8** [[TMP113]], align 4
27650 // CHECK20-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
27651 // CHECK20-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
27652 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP115]], align 4
27653 // CHECK20-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
27654 // CHECK20-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
27655 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP117]], align 4
27656 // CHECK20-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
27657 // CHECK20-NEXT:    store i64 4, i64* [[TMP118]], align 4
27658 // CHECK20-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
27659 // CHECK20-NEXT:    store i8* null, i8** [[TMP119]], align 4
27660 // CHECK20-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
27661 // CHECK20-NEXT:    [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
27662 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP121]], align 4
27663 // CHECK20-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
27664 // CHECK20-NEXT:    [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
27665 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP123]], align 4
27666 // CHECK20-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
27667 // CHECK20-NEXT:    store i64 [[TMP107]], i64* [[TMP124]], align 4
27668 // CHECK20-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
27669 // CHECK20-NEXT:    store i8* null, i8** [[TMP125]], align 4
27670 // CHECK20-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
27671 // CHECK20-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
27672 // CHECK20-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
27673 // CHECK20-NEXT:    [[TMP129:%.*]] = load i32, i32* [[N]], align 4
27674 // CHECK20-NEXT:    store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
27675 // CHECK20-NEXT:    [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
27676 // CHECK20-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
27677 // CHECK20-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
27678 // CHECK20-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
27679 // CHECK20-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
27680 // CHECK20-NEXT:    [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
27681 // CHECK20-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
27682 // CHECK20-NEXT:    [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
27683 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
27684 // CHECK20-NEXT:    [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
27685 // CHECK20-NEXT:    [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
27686 // CHECK20-NEXT:    br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
27687 // CHECK20:       omp_offload.failed44:
27688 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
27689 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
27690 // CHECK20:       omp_offload.cont45:
27691 // CHECK20-NEXT:    [[TMP135:%.*]] = load i32, i32* [[M]], align 4
27692 // CHECK20-NEXT:    store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
27693 // CHECK20-NEXT:    [[TMP136:%.*]] = load i32, i32* [[N]], align 4
27694 // CHECK20-NEXT:    store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
27695 // CHECK20-NEXT:    [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
27696 // CHECK20-NEXT:    [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
27697 // CHECK20-NEXT:    store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
27698 // CHECK20-NEXT:    [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
27699 // CHECK20-NEXT:    [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
27700 // CHECK20-NEXT:    [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
27701 // CHECK20-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
27702 // CHECK20-NEXT:    [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
27703 // CHECK20-NEXT:    store i32 [[TMP137]], i32* [[TMP143]], align 4
27704 // CHECK20-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
27705 // CHECK20-NEXT:    [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
27706 // CHECK20-NEXT:    store i32 [[TMP137]], i32* [[TMP145]], align 4
27707 // CHECK20-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
27708 // CHECK20-NEXT:    store i64 4, i64* [[TMP146]], align 4
27709 // CHECK20-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
27710 // CHECK20-NEXT:    store i8* null, i8** [[TMP147]], align 4
27711 // CHECK20-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
27712 // CHECK20-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
27713 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP149]], align 4
27714 // CHECK20-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
27715 // CHECK20-NEXT:    [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
27716 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[TMP151]], align 4
27717 // CHECK20-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
27718 // CHECK20-NEXT:    store i64 4, i64* [[TMP152]], align 4
27719 // CHECK20-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
27720 // CHECK20-NEXT:    store i8* null, i8** [[TMP153]], align 4
27721 // CHECK20-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
27722 // CHECK20-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
27723 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP155]], align 4
27724 // CHECK20-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
27725 // CHECK20-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
27726 // CHECK20-NEXT:    store i32* [[VLA]], i32** [[TMP157]], align 4
27727 // CHECK20-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
27728 // CHECK20-NEXT:    store i64 [[TMP141]], i64* [[TMP158]], align 4
27729 // CHECK20-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
27730 // CHECK20-NEXT:    store i8* null, i8** [[TMP159]], align 4
27731 // CHECK20-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
27732 // CHECK20-NEXT:    [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
27733 // CHECK20-NEXT:    store i32 [[TMP139]], i32* [[TMP161]], align 4
27734 // CHECK20-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
27735 // CHECK20-NEXT:    [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
27736 // CHECK20-NEXT:    store i32 [[TMP139]], i32* [[TMP163]], align 4
27737 // CHECK20-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
27738 // CHECK20-NEXT:    store i64 4, i64* [[TMP164]], align 4
27739 // CHECK20-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
27740 // CHECK20-NEXT:    store i8* null, i8** [[TMP165]], align 4
27741 // CHECK20-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
27742 // CHECK20-NEXT:    [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
27743 // CHECK20-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
27744 // CHECK20-NEXT:    [[TMP169:%.*]] = load i32, i32* [[N]], align 4
27745 // CHECK20-NEXT:    store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
27746 // CHECK20-NEXT:    [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
27747 // CHECK20-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
27748 // CHECK20-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
27749 // CHECK20-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
27750 // CHECK20-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
27751 // CHECK20-NEXT:    [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
27752 // CHECK20-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
27753 // CHECK20-NEXT:    [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
27754 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
27755 // CHECK20-NEXT:    [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
27756 // CHECK20-NEXT:    [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
27757 // CHECK20-NEXT:    br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
27758 // CHECK20:       omp_offload.failed60:
27759 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
27760 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
27761 // CHECK20:       omp_offload.cont61:
27762 // CHECK20-NEXT:    [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
27763 // CHECK20-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
27764 // CHECK20-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
27765 // CHECK20-NEXT:    [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27766 // CHECK20-NEXT:    call void @llvm.stackrestore(i8* [[TMP176]])
27767 // CHECK20-NEXT:    [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
27768 // CHECK20-NEXT:    ret i32 [[TMP177]]
27769 //
27770 //
27771 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
27772 // CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
27773 // CHECK20-NEXT:  entry:
27774 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27775 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
27776 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
27777 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
27778 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27779 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
27780 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
27781 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
27782 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
27783 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27784 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
27785 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
27786 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
27787 // CHECK20-NEXT:    ret void
27788 //
27789 //
27790 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined.
27791 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
27792 // CHECK20-NEXT:  entry:
27793 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27794 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27795 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27796 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
27797 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
27798 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27799 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27800 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27801 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27802 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
27803 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
27804 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
27805 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27806 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27807 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
27808 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
27809 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27810 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27811 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27812 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
27813 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
27814 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
27815 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
27816 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27817 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
27818 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27819 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
27820 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27821 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27822 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27823 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
27824 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27825 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
27826 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27827 // CHECK20:       omp.precond.then:
27828 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
27829 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27830 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
27831 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27832 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27833 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27834 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
27835 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27836 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27837 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27838 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
27839 // CHECK20-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27840 // CHECK20:       cond.true:
27841 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27842 // CHECK20-NEXT:    br label [[COND_END:%.*]]
27843 // CHECK20:       cond.false:
27844 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27845 // CHECK20-NEXT:    br label [[COND_END]]
27846 // CHECK20:       cond.end:
27847 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
27848 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
27849 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
27850 // CHECK20-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
27851 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27852 // CHECK20:       omp.inner.for.cond:
27853 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
27854 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
27855 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
27856 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27857 // CHECK20:       omp.inner.for.body:
27858 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14
27859 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14
27860 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !14
27861 // CHECK20-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !14
27862 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !14
27863 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !14
27864 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27865 // CHECK20:       omp.inner.for.inc:
27866 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
27867 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14
27868 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
27869 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
27870 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
27871 // CHECK20:       omp.inner.for.end:
27872 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27873 // CHECK20:       omp.loop.exit:
27874 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27875 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
27876 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
27877 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27878 // CHECK20-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
27879 // CHECK20-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27880 // CHECK20:       .omp.final.then:
27881 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27882 // CHECK20-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
27883 // CHECK20-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
27884 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
27885 // CHECK20-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
27886 // CHECK20-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
27887 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27888 // CHECK20:       .omp.final.done:
27889 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
27890 // CHECK20:       omp.precond.end:
27891 // CHECK20-NEXT:    ret void
27892 //
27893 //
27894 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1
27895 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
27896 // CHECK20-NEXT:  entry:
27897 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27898 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27899 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
27900 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
27901 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27902 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
27903 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
27904 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27905 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27906 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27907 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27908 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
27909 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27910 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27911 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27912 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27913 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
27914 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27915 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27916 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27917 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27918 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27919 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
27920 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
27921 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
27922 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
27923 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27924 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
27925 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27926 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
27927 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27928 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27929 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27930 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
27931 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27932 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
27933 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27934 // CHECK20:       omp.precond.then:
27935 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27936 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27937 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
27938 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27939 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27940 // CHECK20-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
27941 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
27942 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27943 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27944 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27945 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
27946 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27947 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27948 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27949 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
27950 // CHECK20-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27951 // CHECK20:       cond.true:
27952 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27953 // CHECK20-NEXT:    br label [[COND_END:%.*]]
27954 // CHECK20:       cond.false:
27955 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27956 // CHECK20-NEXT:    br label [[COND_END]]
27957 // CHECK20:       cond.end:
27958 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
27959 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
27960 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27961 // CHECK20-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
27962 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27963 // CHECK20:       omp.inner.for.cond:
27964 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
27965 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
27966 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
27967 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27968 // CHECK20:       omp.inner.for.body:
27969 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
27970 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
27971 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27972 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !18
27973 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !18
27974 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
27975 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
27976 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27977 // CHECK20:       omp.body.continue:
27978 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27979 // CHECK20:       omp.inner.for.inc:
27980 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
27981 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
27982 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
27983 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
27984 // CHECK20:       omp.inner.for.end:
27985 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27986 // CHECK20:       omp.loop.exit:
27987 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27988 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
27989 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
27990 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27991 // CHECK20-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
27992 // CHECK20-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27993 // CHECK20:       .omp.final.then:
27994 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27995 // CHECK20-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
27996 // CHECK20-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
27997 // CHECK20-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
27998 // CHECK20-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
27999 // CHECK20-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
28000 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28001 // CHECK20:       .omp.final.done:
28002 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28003 // CHECK20:       omp.precond.end:
28004 // CHECK20-NEXT:    ret void
28005 //
28006 //
28007 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
28008 // CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28009 // CHECK20-NEXT:  entry:
28010 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28011 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28012 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28013 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28014 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28015 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28016 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28017 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28018 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28019 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28020 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
28021 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
28022 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
28023 // CHECK20-NEXT:    ret void
28024 //
28025 //
28026 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2
28027 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28028 // CHECK20-NEXT:  entry:
28029 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28030 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28031 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28032 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28033 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28034 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28035 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28036 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28037 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28038 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28039 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
28040 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
28041 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28042 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28043 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
28044 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28045 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28046 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28047 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28048 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28049 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28050 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28051 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28052 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28053 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
28054 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28055 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28056 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28057 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28058 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28059 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28060 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28061 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28062 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28063 // CHECK20:       omp.precond.then:
28064 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
28065 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28066 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
28067 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28068 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28069 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28070 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
28071 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
28072 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28073 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28074 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
28075 // CHECK20-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28076 // CHECK20:       cond.true:
28077 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28078 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28079 // CHECK20:       cond.false:
28080 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28081 // CHECK20-NEXT:    br label [[COND_END]]
28082 // CHECK20:       cond.end:
28083 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
28084 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
28085 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
28086 // CHECK20-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
28087 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28088 // CHECK20:       omp.inner.for.cond:
28089 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
28090 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
28091 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
28092 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28093 // CHECK20:       omp.inner.for.body:
28094 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !23
28095 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !23
28096 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !23
28097 // CHECK20-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !23
28098 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !23
28099 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !23
28100 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28101 // CHECK20:       omp.inner.for.inc:
28102 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
28103 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !23
28104 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
28105 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
28106 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
28107 // CHECK20:       omp.inner.for.end:
28108 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28109 // CHECK20:       omp.loop.exit:
28110 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28111 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
28112 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
28113 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28114 // CHECK20-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
28115 // CHECK20-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28116 // CHECK20:       .omp.final.then:
28117 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28118 // CHECK20-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
28119 // CHECK20-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
28120 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
28121 // CHECK20-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
28122 // CHECK20-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
28123 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28124 // CHECK20:       .omp.final.done:
28125 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28126 // CHECK20:       omp.precond.end:
28127 // CHECK20-NEXT:    ret void
28128 //
28129 //
28130 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3
28131 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28132 // CHECK20-NEXT:  entry:
28133 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28134 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28135 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
28136 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
28137 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28138 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28139 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28140 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28141 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28142 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28143 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28144 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28145 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28146 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28147 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28148 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28149 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
28150 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28151 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28152 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28153 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28154 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28155 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28156 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28157 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28158 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28159 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28160 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
28161 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28162 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28163 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28164 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28165 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28166 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28167 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28168 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28169 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28170 // CHECK20:       omp.precond.then:
28171 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28172 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28173 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
28174 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28175 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28176 // CHECK20-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
28177 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
28178 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28179 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28180 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28181 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
28182 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
28183 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28184 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28185 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
28186 // CHECK20-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28187 // CHECK20:       cond.true:
28188 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28189 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28190 // CHECK20:       cond.false:
28191 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28192 // CHECK20-NEXT:    br label [[COND_END]]
28193 // CHECK20:       cond.end:
28194 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
28195 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
28196 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28197 // CHECK20-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
28198 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28199 // CHECK20:       omp.inner.for.cond:
28200 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
28201 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
28202 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
28203 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28204 // CHECK20:       omp.inner.for.body:
28205 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
28206 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
28207 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28208 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !26
28209 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !26
28210 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
28211 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !26
28212 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28213 // CHECK20:       omp.body.continue:
28214 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28215 // CHECK20:       omp.inner.for.inc:
28216 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
28217 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
28218 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
28219 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
28220 // CHECK20:       omp.inner.for.end:
28221 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28222 // CHECK20:       omp.loop.exit:
28223 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28224 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
28225 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
28226 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28227 // CHECK20-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
28228 // CHECK20-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28229 // CHECK20:       .omp.final.then:
28230 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28231 // CHECK20-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
28232 // CHECK20-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
28233 // CHECK20-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
28234 // CHECK20-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
28235 // CHECK20-NEXT:    store i32 [[ADD10]], i32* [[I3]], align 4
28236 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28237 // CHECK20:       .omp.final.done:
28238 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28239 // CHECK20:       omp.precond.end:
28240 // CHECK20-NEXT:    ret void
28241 //
28242 //
28243 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
28244 // CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28245 // CHECK20-NEXT:  entry:
28246 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28247 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28248 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28249 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28250 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28251 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
28252 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28253 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28254 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28255 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28256 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28257 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28258 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28259 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
28260 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
28261 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28262 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
28263 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
28264 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
28265 // CHECK20-NEXT:    ret void
28266 //
28267 //
28268 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5
28269 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28270 // CHECK20-NEXT:  entry:
28271 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28272 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28273 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28274 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28275 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28276 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28277 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28278 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28279 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28280 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28281 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28282 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
28283 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
28284 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28285 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28286 // CHECK20-NEXT:    [[I4:%.*]] = alloca i32, align 4
28287 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28288 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
28289 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28290 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28291 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28292 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28293 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28294 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28295 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28296 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28297 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28298 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28299 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28300 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28301 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28302 // CHECK20-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
28303 // CHECK20-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28304 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28305 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28306 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28307 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28308 // CHECK20:       omp.precond.then:
28309 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
28310 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28311 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
28312 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28313 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28314 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28315 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28316 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
28317 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
28318 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28319 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28320 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
28321 // CHECK20-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28322 // CHECK20:       cond.true:
28323 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28324 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28325 // CHECK20:       cond.false:
28326 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28327 // CHECK20-NEXT:    br label [[COND_END]]
28328 // CHECK20:       cond.end:
28329 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
28330 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
28331 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
28332 // CHECK20-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
28333 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28334 // CHECK20:       omp.inner.for.cond:
28335 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
28336 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
28337 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
28338 // CHECK20-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
28339 // CHECK20-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28340 // CHECK20:       omp.inner.for.body:
28341 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
28342 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28343 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !29
28344 // CHECK20-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4, !llvm.access.group !29
28345 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !29
28346 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29
28347 // CHECK20-NEXT:    store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
28348 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29
28349 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29
28350 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28351 // CHECK20:       omp.inner.for.inc:
28352 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
28353 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
28354 // CHECK20-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
28355 // CHECK20-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
28356 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
28357 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
28358 // CHECK20-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
28359 // CHECK20-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
28360 // CHECK20-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28361 // CHECK20-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !29
28362 // CHECK20-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
28363 // CHECK20-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28364 // CHECK20-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28365 // CHECK20-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
28366 // CHECK20-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
28367 // CHECK20-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
28368 // CHECK20:       cond.true11:
28369 // CHECK20-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !29
28370 // CHECK20-NEXT:    br label [[COND_END13:%.*]]
28371 // CHECK20:       cond.false12:
28372 // CHECK20-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28373 // CHECK20-NEXT:    br label [[COND_END13]]
28374 // CHECK20:       cond.end13:
28375 // CHECK20-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
28376 // CHECK20-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !29
28377 // CHECK20-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !29
28378 // CHECK20-NEXT:    store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
28379 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
28380 // CHECK20:       omp.inner.for.end:
28381 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28382 // CHECK20:       omp.loop.exit:
28383 // CHECK20-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28384 // CHECK20-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
28385 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
28386 // CHECK20-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28387 // CHECK20-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
28388 // CHECK20-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28389 // CHECK20:       .omp.final.then:
28390 // CHECK20-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28391 // CHECK20-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP37]], 0
28392 // CHECK20-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
28393 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
28394 // CHECK20-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
28395 // CHECK20-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
28396 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28397 // CHECK20:       .omp.final.done:
28398 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28399 // CHECK20:       omp.precond.end:
28400 // CHECK20-NEXT:    ret void
28401 //
28402 //
28403 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6
28404 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28405 // CHECK20-NEXT:  entry:
28406 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28407 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28408 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
28409 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
28410 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28411 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28412 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28413 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28414 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28415 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28416 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28417 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28418 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28419 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28420 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28421 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28422 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28423 // CHECK20-NEXT:    [[I4:%.*]] = alloca i32, align 4
28424 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28425 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28426 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28427 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28428 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28429 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28430 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28431 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28432 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28433 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28434 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28435 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28436 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28437 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28438 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28439 // CHECK20-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
28440 // CHECK20-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28441 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28442 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28443 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28444 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28445 // CHECK20:       omp.precond.then:
28446 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28447 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28448 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
28449 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28450 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28451 // CHECK20-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
28452 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
28453 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28454 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28455 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28456 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
28457 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
28458 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28459 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28460 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
28461 // CHECK20-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28462 // CHECK20:       cond.true:
28463 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28464 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28465 // CHECK20:       cond.false:
28466 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28467 // CHECK20-NEXT:    br label [[COND_END]]
28468 // CHECK20:       cond.end:
28469 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
28470 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
28471 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28472 // CHECK20-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
28473 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28474 // CHECK20:       omp.inner.for.cond:
28475 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
28476 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
28477 // CHECK20-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
28478 // CHECK20-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28479 // CHECK20:       omp.inner.for.body:
28480 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
28481 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
28482 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28483 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !32
28484 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !32
28485 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
28486 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
28487 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28488 // CHECK20:       omp.body.continue:
28489 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28490 // CHECK20:       omp.inner.for.inc:
28491 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
28492 // CHECK20-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
28493 // CHECK20-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
28494 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
28495 // CHECK20:       omp.inner.for.end:
28496 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28497 // CHECK20:       omp.loop.exit:
28498 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28499 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
28500 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
28501 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28502 // CHECK20-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
28503 // CHECK20-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28504 // CHECK20:       .omp.final.then:
28505 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28506 // CHECK20-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP24]], 0
28507 // CHECK20-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
28508 // CHECK20-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
28509 // CHECK20-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
28510 // CHECK20-NEXT:    store i32 [[ADD11]], i32* [[I4]], align 4
28511 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28512 // CHECK20:       .omp.final.done:
28513 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28514 // CHECK20:       omp.precond.end:
28515 // CHECK20-NEXT:    ret void
28516 //
28517 //
28518 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
28519 // CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28520 // CHECK20-NEXT:  entry:
28521 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28522 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28523 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28524 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28525 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28526 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28527 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28528 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28529 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28530 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28531 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
28532 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
28533 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
28534 // CHECK20-NEXT:    ret void
28535 //
28536 //
28537 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8
28538 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28539 // CHECK20-NEXT:  entry:
28540 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28541 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28542 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28543 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28544 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28545 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28546 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28547 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28548 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28549 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28550 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
28551 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
28552 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28553 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28554 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
28555 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28556 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28557 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28558 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28559 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28560 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28561 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28562 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28563 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28564 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
28565 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28566 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28567 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28568 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28569 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28570 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28571 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28572 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28573 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28574 // CHECK20:       omp.precond.then:
28575 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
28576 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28577 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
28578 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28579 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28580 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28581 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
28582 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
28583 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28584 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28585 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
28586 // CHECK20-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28587 // CHECK20:       cond.true:
28588 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28589 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28590 // CHECK20:       cond.false:
28591 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28592 // CHECK20-NEXT:    br label [[COND_END]]
28593 // CHECK20:       cond.end:
28594 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
28595 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
28596 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
28597 // CHECK20-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
28598 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28599 // CHECK20:       omp.inner.for.cond:
28600 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
28601 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
28602 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
28603 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28604 // CHECK20:       omp.inner.for.body:
28605 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35
28606 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35
28607 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35
28608 // CHECK20-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35
28609 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35
28610 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35
28611 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28612 // CHECK20:       omp.inner.for.inc:
28613 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
28614 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35
28615 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
28616 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
28617 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
28618 // CHECK20:       omp.inner.for.end:
28619 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28620 // CHECK20:       omp.loop.exit:
28621 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28622 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
28623 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
28624 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28625 // CHECK20-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
28626 // CHECK20-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28627 // CHECK20:       .omp.final.then:
28628 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28629 // CHECK20-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
28630 // CHECK20-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
28631 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
28632 // CHECK20-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
28633 // CHECK20-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
28634 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28635 // CHECK20:       .omp.final.done:
28636 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28637 // CHECK20:       omp.precond.end:
28638 // CHECK20-NEXT:    ret void
28639 //
28640 //
28641 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9
28642 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
28643 // CHECK20-NEXT:  entry:
28644 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28645 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28646 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
28647 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
28648 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28649 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28650 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28651 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28652 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28653 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28654 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28655 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28656 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28657 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28658 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28659 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28660 // CHECK20-NEXT:    [[I3:%.*]] = alloca i32, align 4
28661 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28662 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28663 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28664 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28665 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28666 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28667 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28668 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28669 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28670 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28671 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
28672 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28673 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28674 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28675 // CHECK20-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28676 // CHECK20-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28677 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28678 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28679 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28680 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28681 // CHECK20:       omp.precond.then:
28682 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28683 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28684 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
28685 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28686 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28687 // CHECK20-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
28688 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
28689 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28690 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28691 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28692 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28693 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28694 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
28695 // CHECK20-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
28696 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
28697 // CHECK20:       omp.dispatch.cond:
28698 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28699 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
28700 // CHECK20-NEXT:    [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
28701 // CHECK20-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
28702 // CHECK20-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
28703 // CHECK20:       omp.dispatch.body:
28704 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28705 // CHECK20-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
28706 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28707 // CHECK20:       omp.inner.for.cond:
28708 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
28709 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
28710 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
28711 // CHECK20-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28712 // CHECK20:       omp.inner.for.body:
28713 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
28714 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
28715 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28716 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !38
28717 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !38
28718 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
28719 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !38
28720 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28721 // CHECK20:       omp.body.continue:
28722 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28723 // CHECK20:       omp.inner.for.inc:
28724 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
28725 // CHECK20-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
28726 // CHECK20-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
28727 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
28728 // CHECK20:       omp.inner.for.end:
28729 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
28730 // CHECK20:       omp.dispatch.inc:
28731 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
28732 // CHECK20:       omp.dispatch.end:
28733 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28734 // CHECK20-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
28735 // CHECK20-NEXT:    br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28736 // CHECK20:       .omp.final.then:
28737 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28738 // CHECK20-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP23]], 0
28739 // CHECK20-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
28740 // CHECK20-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
28741 // CHECK20-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
28742 // CHECK20-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
28743 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28744 // CHECK20:       .omp.final.done:
28745 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28746 // CHECK20:       omp.precond.end:
28747 // CHECK20-NEXT:    ret void
28748 //
28749 //
28750 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
28751 // CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28752 // CHECK20-NEXT:  entry:
28753 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28754 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28755 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28756 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28757 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28758 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
28759 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28760 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28761 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28762 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28763 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28764 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28765 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28766 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
28767 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
28768 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28769 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
28770 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
28771 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
28772 // CHECK20-NEXT:    ret void
28773 //
28774 //
28775 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11
28776 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28777 // CHECK20-NEXT:  entry:
28778 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28779 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28780 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28781 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28782 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28783 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28784 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28785 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28786 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28787 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28788 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28789 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
28790 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
28791 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28792 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28793 // CHECK20-NEXT:    [[I4:%.*]] = alloca i32, align 4
28794 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
28795 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
28796 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28797 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28798 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28799 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28800 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28801 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28802 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28803 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28804 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28805 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28806 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28807 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28808 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28809 // CHECK20-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
28810 // CHECK20-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28811 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28812 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28813 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28814 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28815 // CHECK20:       omp.precond.then:
28816 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
28817 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28818 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
28819 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28820 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28821 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28822 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
28823 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
28824 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28825 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28826 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
28827 // CHECK20-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
28828 // CHECK20:       cond.true:
28829 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28830 // CHECK20-NEXT:    br label [[COND_END:%.*]]
28831 // CHECK20:       cond.false:
28832 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
28833 // CHECK20-NEXT:    br label [[COND_END]]
28834 // CHECK20:       cond.end:
28835 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
28836 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
28837 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
28838 // CHECK20-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
28839 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28840 // CHECK20:       omp.inner.for.cond:
28841 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
28842 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
28843 // CHECK20-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
28844 // CHECK20-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28845 // CHECK20:       omp.inner.for.body:
28846 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !41
28847 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !41
28848 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !41
28849 // CHECK20-NEXT:    store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !41
28850 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !41
28851 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41
28852 // CHECK20-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
28853 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41
28854 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41
28855 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28856 // CHECK20:       omp.inner.for.inc:
28857 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
28858 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !41
28859 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
28860 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
28861 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
28862 // CHECK20:       omp.inner.for.end:
28863 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
28864 // CHECK20:       omp.loop.exit:
28865 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28866 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
28867 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
28868 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28869 // CHECK20-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
28870 // CHECK20-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28871 // CHECK20:       .omp.final.then:
28872 // CHECK20-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28873 // CHECK20-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
28874 // CHECK20-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
28875 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
28876 // CHECK20-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
28877 // CHECK20-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
28878 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28879 // CHECK20:       .omp.final.done:
28880 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28881 // CHECK20:       omp.precond.end:
28882 // CHECK20-NEXT:    ret void
28883 //
28884 //
28885 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12
28886 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
28887 // CHECK20-NEXT:  entry:
28888 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
28889 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
28890 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
28891 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
28892 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28893 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
28894 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
28895 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
28896 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28897 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28898 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28899 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28900 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
28901 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28902 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28903 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
28904 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
28905 // CHECK20-NEXT:    [[I4:%.*]] = alloca i32, align 4
28906 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
28907 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
28908 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28909 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28910 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28911 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
28912 // CHECK20-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
28913 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28914 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
28915 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
28916 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28917 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28918 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28919 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
28920 // CHECK20-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28921 // CHECK20-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
28922 // CHECK20-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28923 // CHECK20-NEXT:    store i32 0, i32* [[I]], align 4
28924 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28925 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
28926 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
28927 // CHECK20:       omp.precond.then:
28928 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28929 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28930 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
28931 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
28932 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
28933 // CHECK20-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
28934 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
28935 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
28936 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
28937 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
28938 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28939 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28940 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28941 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
28942 // CHECK20-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
28943 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
28944 // CHECK20:       omp.dispatch.cond:
28945 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
28946 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
28947 // CHECK20-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
28948 // CHECK20-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
28949 // CHECK20-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
28950 // CHECK20:       omp.dispatch.body:
28951 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28952 // CHECK20-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
28953 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28954 // CHECK20:       omp.inner.for.cond:
28955 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
28956 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
28957 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
28958 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28959 // CHECK20:       omp.inner.for.body:
28960 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
28961 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
28962 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28963 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !44
28964 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !44
28965 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
28966 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
28967 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28968 // CHECK20:       omp.body.continue:
28969 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28970 // CHECK20:       omp.inner.for.inc:
28971 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
28972 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
28973 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
28974 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
28975 // CHECK20:       omp.inner.for.end:
28976 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
28977 // CHECK20:       omp.dispatch.inc:
28978 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
28979 // CHECK20:       omp.dispatch.end:
28980 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
28981 // CHECK20-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
28982 // CHECK20-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
28983 // CHECK20:       .omp.final.then:
28984 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28985 // CHECK20-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
28986 // CHECK20-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
28987 // CHECK20-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
28988 // CHECK20-NEXT:    [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
28989 // CHECK20-NEXT:    store i32 [[ADD10]], i32* [[I4]], align 4
28990 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
28991 // CHECK20:       .omp.final.done:
28992 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
28993 // CHECK20:       omp.precond.end:
28994 // CHECK20-NEXT:    ret void
28995 //
28996 //
28997 // CHECK20-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
28998 // CHECK20-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
28999 // CHECK20-NEXT:  entry:
29000 // CHECK20-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
29001 // CHECK20-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
29002 // CHECK20-NEXT:    [[M:%.*]] = alloca i32, align 4
29003 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
29004 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
29005 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
29006 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29007 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
29008 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
29009 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
29010 // CHECK20-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
29011 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29012 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
29013 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
29014 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
29015 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
29016 // CHECK20-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
29017 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
29018 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
29019 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
29020 // CHECK20-NEXT:    [[_TMP16:%.*]] = alloca i32, align 4
29021 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
29022 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
29023 // CHECK20-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
29024 // CHECK20-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
29025 // CHECK20-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
29026 // CHECK20-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
29027 // CHECK20-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
29028 // CHECK20-NEXT:    store i32 10, i32* [[M]], align 4
29029 // CHECK20-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
29030 // CHECK20-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
29031 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
29032 // CHECK20-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
29033 // CHECK20-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
29034 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
29035 // CHECK20-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
29036 // CHECK20-NEXT:    store i8* null, i8** [[TMP4]], align 4
29037 // CHECK20-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
29038 // CHECK20-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
29039 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
29040 // CHECK20-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
29041 // CHECK20-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
29042 // CHECK20-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
29043 // CHECK20:       omp_offload.failed:
29044 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
29045 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT]]
29046 // CHECK20:       omp_offload.cont:
29047 // CHECK20-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
29048 // CHECK20-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
29049 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
29050 // CHECK20-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
29051 // CHECK20-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
29052 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
29053 // CHECK20-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
29054 // CHECK20-NEXT:    store i8* null, i8** [[TMP13]], align 4
29055 // CHECK20-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
29056 // CHECK20-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
29057 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
29058 // CHECK20-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
29059 // CHECK20-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
29060 // CHECK20-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
29061 // CHECK20:       omp_offload.failed5:
29062 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
29063 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
29064 // CHECK20:       omp_offload.cont6:
29065 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[M]], align 4
29066 // CHECK20-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
29067 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29068 // CHECK20-NEXT:    store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29069 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29070 // CHECK20-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
29071 // CHECK20-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
29072 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
29073 // CHECK20-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
29074 // CHECK20-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
29075 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
29076 // CHECK20-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
29077 // CHECK20-NEXT:    store i8* null, i8** [[TMP25]], align 4
29078 // CHECK20-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
29079 // CHECK20-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
29080 // CHECK20-NEXT:    store i32 [[TMP20]], i32* [[TMP27]], align 4
29081 // CHECK20-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
29082 // CHECK20-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
29083 // CHECK20-NEXT:    store i32 [[TMP20]], i32* [[TMP29]], align 4
29084 // CHECK20-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
29085 // CHECK20-NEXT:    store i8* null, i8** [[TMP30]], align 4
29086 // CHECK20-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
29087 // CHECK20-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
29088 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
29089 // CHECK20-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
29090 // CHECK20-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
29091 // CHECK20-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
29092 // CHECK20:       omp_offload.failed11:
29093 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
29094 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
29095 // CHECK20:       omp_offload.cont12:
29096 // CHECK20-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
29097 // CHECK20-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
29098 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
29099 // CHECK20-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
29100 // CHECK20-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
29101 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
29102 // CHECK20-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
29103 // CHECK20-NEXT:    store i8* null, i8** [[TMP39]], align 4
29104 // CHECK20-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
29105 // CHECK20-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
29106 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
29107 // CHECK20-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
29108 // CHECK20-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
29109 // CHECK20-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
29110 // CHECK20:       omp_offload.failed17:
29111 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
29112 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
29113 // CHECK20:       omp_offload.cont18:
29114 // CHECK20-NEXT:    [[TMP44:%.*]] = load i32, i32* [[M]], align 4
29115 // CHECK20-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
29116 // CHECK20-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
29117 // CHECK20-NEXT:    store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
29118 // CHECK20-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
29119 // CHECK20-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
29120 // CHECK20-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
29121 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
29122 // CHECK20-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
29123 // CHECK20-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
29124 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
29125 // CHECK20-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
29126 // CHECK20-NEXT:    store i8* null, i8** [[TMP51]], align 4
29127 // CHECK20-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
29128 // CHECK20-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
29129 // CHECK20-NEXT:    store i32 [[TMP46]], i32* [[TMP53]], align 4
29130 // CHECK20-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
29131 // CHECK20-NEXT:    [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
29132 // CHECK20-NEXT:    store i32 [[TMP46]], i32* [[TMP55]], align 4
29133 // CHECK20-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
29134 // CHECK20-NEXT:    store i8* null, i8** [[TMP56]], align 4
29135 // CHECK20-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
29136 // CHECK20-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
29137 // CHECK20-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
29138 // CHECK20-NEXT:    [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
29139 // CHECK20-NEXT:    [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
29140 // CHECK20-NEXT:    br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
29141 // CHECK20:       omp_offload.failed25:
29142 // CHECK20-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
29143 // CHECK20-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
29144 // CHECK20:       omp_offload.cont26:
29145 // CHECK20-NEXT:    ret i32 0
29146 //
29147 //
29148 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
29149 // CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29150 // CHECK20-NEXT:  entry:
29151 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29152 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29153 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29154 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
29155 // CHECK20-NEXT:    ret void
29156 //
29157 //
29158 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14
29159 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29160 // CHECK20-NEXT:  entry:
29161 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29162 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29163 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29164 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29165 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29166 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
29167 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
29168 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29169 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29170 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29171 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29172 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29173 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29174 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29175 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
29176 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
29177 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29178 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29179 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29180 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
29181 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29182 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29183 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
29184 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29185 // CHECK20:       cond.true:
29186 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29187 // CHECK20:       cond.false:
29188 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29189 // CHECK20-NEXT:    br label [[COND_END]]
29190 // CHECK20:       cond.end:
29191 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
29192 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
29193 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
29194 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29195 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29196 // CHECK20:       omp.inner.for.cond:
29197 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
29198 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
29199 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
29200 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29201 // CHECK20:       omp.inner.for.body:
29202 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47
29203 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47
29204 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47
29205 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29206 // CHECK20:       omp.inner.for.inc:
29207 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
29208 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47
29209 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
29210 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
29211 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
29212 // CHECK20:       omp.inner.for.end:
29213 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29214 // CHECK20:       omp.loop.exit:
29215 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
29216 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29217 // CHECK20-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
29218 // CHECK20-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29219 // CHECK20:       .omp.final.then:
29220 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29221 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29222 // CHECK20:       .omp.final.done:
29223 // CHECK20-NEXT:    ret void
29224 //
29225 //
29226 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15
29227 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29228 // CHECK20-NEXT:  entry:
29229 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29230 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29231 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
29232 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
29233 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29234 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29235 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29236 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29237 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29238 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29239 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29240 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29241 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29242 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29243 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29244 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29245 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29246 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29247 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29248 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29249 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29250 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29251 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
29252 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29253 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29254 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29255 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29256 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
29257 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29258 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29259 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
29260 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29261 // CHECK20:       cond.true:
29262 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29263 // CHECK20:       cond.false:
29264 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29265 // CHECK20-NEXT:    br label [[COND_END]]
29266 // CHECK20:       cond.end:
29267 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
29268 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
29269 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29270 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
29271 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29272 // CHECK20:       omp.inner.for.cond:
29273 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
29274 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50
29275 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
29276 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29277 // CHECK20:       omp.inner.for.body:
29278 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
29279 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
29280 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29281 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50
29282 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !50
29283 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
29284 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !50
29285 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29286 // CHECK20:       omp.body.continue:
29287 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29288 // CHECK20:       omp.inner.for.inc:
29289 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
29290 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
29291 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
29292 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
29293 // CHECK20:       omp.inner.for.end:
29294 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29295 // CHECK20:       omp.loop.exit:
29296 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
29297 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29298 // CHECK20-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
29299 // CHECK20-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29300 // CHECK20:       .omp.final.then:
29301 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29302 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29303 // CHECK20:       .omp.final.done:
29304 // CHECK20-NEXT:    ret void
29305 //
29306 //
29307 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
29308 // CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29309 // CHECK20-NEXT:  entry:
29310 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29311 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29312 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29313 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
29314 // CHECK20-NEXT:    ret void
29315 //
29316 //
29317 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17
29318 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29319 // CHECK20-NEXT:  entry:
29320 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29321 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29322 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29323 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29324 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29325 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
29326 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
29327 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29328 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29329 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29330 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29331 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29332 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29333 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29334 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
29335 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
29336 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29337 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29338 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29339 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
29340 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29341 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29342 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
29343 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29344 // CHECK20:       cond.true:
29345 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29346 // CHECK20:       cond.false:
29347 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29348 // CHECK20-NEXT:    br label [[COND_END]]
29349 // CHECK20:       cond.end:
29350 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
29351 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
29352 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
29353 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29354 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29355 // CHECK20:       omp.inner.for.cond:
29356 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
29357 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
29358 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
29359 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29360 // CHECK20:       omp.inner.for.body:
29361 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53
29362 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53
29363 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53
29364 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29365 // CHECK20:       omp.inner.for.inc:
29366 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
29367 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !53
29368 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
29369 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
29370 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
29371 // CHECK20:       omp.inner.for.end:
29372 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29373 // CHECK20:       omp.loop.exit:
29374 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
29375 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29376 // CHECK20-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
29377 // CHECK20-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29378 // CHECK20:       .omp.final.then:
29379 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29380 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29381 // CHECK20:       .omp.final.done:
29382 // CHECK20-NEXT:    ret void
29383 //
29384 //
29385 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18
29386 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29387 // CHECK20-NEXT:  entry:
29388 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29389 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29390 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
29391 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
29392 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29393 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29394 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29395 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29396 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29397 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29398 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29399 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29400 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29401 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29402 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29403 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29404 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29405 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29406 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29407 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29408 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29409 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29410 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
29411 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29412 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29413 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29414 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29415 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
29416 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29417 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29418 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
29419 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29420 // CHECK20:       cond.true:
29421 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29422 // CHECK20:       cond.false:
29423 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29424 // CHECK20-NEXT:    br label [[COND_END]]
29425 // CHECK20:       cond.end:
29426 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
29427 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
29428 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29429 // CHECK20-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
29430 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29431 // CHECK20:       omp.inner.for.cond:
29432 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
29433 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !56
29434 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
29435 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29436 // CHECK20:       omp.inner.for.body:
29437 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
29438 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
29439 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29440 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !56
29441 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !56
29442 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
29443 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !56
29444 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29445 // CHECK20:       omp.body.continue:
29446 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29447 // CHECK20:       omp.inner.for.inc:
29448 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
29449 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
29450 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
29451 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
29452 // CHECK20:       omp.inner.for.end:
29453 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29454 // CHECK20:       omp.loop.exit:
29455 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
29456 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29457 // CHECK20-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
29458 // CHECK20-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29459 // CHECK20:       .omp.final.then:
29460 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29461 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29462 // CHECK20:       .omp.final.done:
29463 // CHECK20-NEXT:    ret void
29464 //
29465 //
29466 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
29467 // CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29468 // CHECK20-NEXT:  entry:
29469 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29470 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29471 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
29472 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29473 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29474 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29475 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29476 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29477 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29478 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
29479 // CHECK20-NEXT:    ret void
29480 //
29481 //
29482 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21
29483 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29484 // CHECK20-NEXT:  entry:
29485 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29486 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29487 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29488 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29489 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29490 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29491 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
29492 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
29493 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29494 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29495 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29496 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
29497 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29498 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29499 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29500 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29501 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29502 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
29503 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
29504 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29505 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29506 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29507 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
29508 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29509 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29510 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
29511 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29512 // CHECK20:       cond.true:
29513 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29514 // CHECK20:       cond.false:
29515 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29516 // CHECK20-NEXT:    br label [[COND_END]]
29517 // CHECK20:       cond.end:
29518 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
29519 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
29520 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
29521 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29522 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29523 // CHECK20:       omp.inner.for.cond:
29524 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
29525 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
29526 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
29527 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29528 // CHECK20:       omp.inner.for.body:
29529 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59
29530 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59
29531 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59
29532 // CHECK20-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
29533 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59
29534 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59
29535 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29536 // CHECK20:       omp.inner.for.inc:
29537 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
29538 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59
29539 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
29540 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
29541 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
29542 // CHECK20:       omp.inner.for.end:
29543 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29544 // CHECK20:       omp.loop.exit:
29545 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
29546 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29547 // CHECK20-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
29548 // CHECK20-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29549 // CHECK20:       .omp.final.then:
29550 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29551 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29552 // CHECK20:       .omp.final.done:
29553 // CHECK20-NEXT:    ret void
29554 //
29555 //
29556 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22
29557 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29558 // CHECK20-NEXT:  entry:
29559 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29560 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29561 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
29562 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
29563 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29564 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29565 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29566 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29567 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29568 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29569 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29570 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29571 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29572 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29573 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29574 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29575 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29576 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29577 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29578 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29579 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29580 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29581 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29582 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29583 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
29584 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29585 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29586 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29587 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29588 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29589 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
29590 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
29591 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
29592 // CHECK20:       omp.dispatch.cond:
29593 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29594 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29595 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
29596 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29597 // CHECK20:       cond.true:
29598 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29599 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29600 // CHECK20:       cond.false:
29601 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29602 // CHECK20-NEXT:    br label [[COND_END]]
29603 // CHECK20:       cond.end:
29604 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
29605 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
29606 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29607 // CHECK20-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
29608 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29609 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29610 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
29611 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
29612 // CHECK20:       omp.dispatch.body:
29613 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29614 // CHECK20:       omp.inner.for.cond:
29615 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
29616 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62
29617 // CHECK20-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
29618 // CHECK20-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29619 // CHECK20:       omp.inner.for.body:
29620 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
29621 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
29622 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29623 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62
29624 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !62
29625 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
29626 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !62
29627 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29628 // CHECK20:       omp.body.continue:
29629 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29630 // CHECK20:       omp.inner.for.inc:
29631 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
29632 // CHECK20-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
29633 // CHECK20-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
29634 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
29635 // CHECK20:       omp.inner.for.end:
29636 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
29637 // CHECK20:       omp.dispatch.inc:
29638 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29639 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
29640 // CHECK20-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
29641 // CHECK20-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
29642 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29643 // CHECK20-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
29644 // CHECK20-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
29645 // CHECK20-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
29646 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
29647 // CHECK20:       omp.dispatch.end:
29648 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
29649 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29650 // CHECK20-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
29651 // CHECK20-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29652 // CHECK20:       .omp.final.then:
29653 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29654 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29655 // CHECK20:       .omp.final.done:
29656 // CHECK20-NEXT:    ret void
29657 //
29658 //
29659 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
29660 // CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29661 // CHECK20-NEXT:  entry:
29662 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29663 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29664 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29665 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
29666 // CHECK20-NEXT:    ret void
29667 //
29668 //
29669 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25
29670 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29671 // CHECK20-NEXT:  entry:
29672 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29673 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29674 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29675 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29676 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29677 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
29678 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
29679 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29680 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29681 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29682 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29683 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29684 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29685 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29686 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
29687 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
29688 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29689 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29690 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29691 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
29692 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29693 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29694 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
29695 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29696 // CHECK20:       cond.true:
29697 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29698 // CHECK20:       cond.false:
29699 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29700 // CHECK20-NEXT:    br label [[COND_END]]
29701 // CHECK20:       cond.end:
29702 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
29703 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
29704 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
29705 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29706 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29707 // CHECK20:       omp.inner.for.cond:
29708 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
29709 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
29710 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
29711 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29712 // CHECK20:       omp.inner.for.body:
29713 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65
29714 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65
29715 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65
29716 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29717 // CHECK20:       omp.inner.for.inc:
29718 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
29719 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !65
29720 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
29721 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
29722 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
29723 // CHECK20:       omp.inner.for.end:
29724 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29725 // CHECK20:       omp.loop.exit:
29726 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
29727 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29728 // CHECK20-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
29729 // CHECK20-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29730 // CHECK20:       .omp.final.then:
29731 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29732 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29733 // CHECK20:       .omp.final.done:
29734 // CHECK20-NEXT:    ret void
29735 //
29736 //
29737 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26
29738 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
29739 // CHECK20-NEXT:  entry:
29740 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29741 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29742 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
29743 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
29744 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29745 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29746 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29747 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29748 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29749 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29750 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29751 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29752 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29753 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29754 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29755 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29756 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29757 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29758 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29759 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29760 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29761 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29762 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
29763 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29764 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29765 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29766 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29767 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29768 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29769 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
29770 // CHECK20-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
29771 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
29772 // CHECK20:       omp.dispatch.cond:
29773 // CHECK20-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
29774 // CHECK20-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
29775 // CHECK20-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
29776 // CHECK20:       omp.dispatch.body:
29777 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29778 // CHECK20-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
29779 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29780 // CHECK20:       omp.inner.for.cond:
29781 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
29782 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !68
29783 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
29784 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29785 // CHECK20:       omp.inner.for.body:
29786 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
29787 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
29788 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29789 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !68
29790 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !68
29791 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
29792 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !68
29793 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29794 // CHECK20:       omp.body.continue:
29795 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29796 // CHECK20:       omp.inner.for.inc:
29797 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
29798 // CHECK20-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
29799 // CHECK20-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
29800 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
29801 // CHECK20:       omp.inner.for.end:
29802 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
29803 // CHECK20:       omp.dispatch.inc:
29804 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
29805 // CHECK20:       omp.dispatch.end:
29806 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29807 // CHECK20-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
29808 // CHECK20-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29809 // CHECK20:       .omp.final.then:
29810 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29811 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29812 // CHECK20:       .omp.final.done:
29813 // CHECK20-NEXT:    ret void
29814 //
29815 //
29816 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
29817 // CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29818 // CHECK20-NEXT:  entry:
29819 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29820 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29821 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
29822 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29823 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29824 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29825 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29826 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29827 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
29828 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
29829 // CHECK20-NEXT:    ret void
29830 //
29831 //
29832 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29
29833 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29834 // CHECK20-NEXT:  entry:
29835 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29836 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29837 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29838 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29839 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29840 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29841 // CHECK20-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
29842 // CHECK20-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
29843 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29844 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29845 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29846 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
29847 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29848 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29849 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29850 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29851 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29852 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
29853 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
29854 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29855 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29856 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29857 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
29858 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
29859 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29860 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
29861 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
29862 // CHECK20:       cond.true:
29863 // CHECK20-NEXT:    br label [[COND_END:%.*]]
29864 // CHECK20:       cond.false:
29865 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
29866 // CHECK20-NEXT:    br label [[COND_END]]
29867 // CHECK20:       cond.end:
29868 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
29869 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
29870 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
29871 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29872 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29873 // CHECK20:       omp.inner.for.cond:
29874 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
29875 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
29876 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
29877 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29878 // CHECK20:       omp.inner.for.body:
29879 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !71
29880 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !71
29881 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71
29882 // CHECK20-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
29883 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71
29884 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71
29885 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29886 // CHECK20:       omp.inner.for.inc:
29887 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
29888 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !71
29889 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
29890 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
29891 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
29892 // CHECK20:       omp.inner.for.end:
29893 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
29894 // CHECK20:       omp.loop.exit:
29895 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
29896 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29897 // CHECK20-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
29898 // CHECK20-NEXT:    br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29899 // CHECK20:       .omp.final.then:
29900 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29901 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29902 // CHECK20:       .omp.final.done:
29903 // CHECK20-NEXT:    ret void
29904 //
29905 //
29906 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30
29907 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
29908 // CHECK20-NEXT:  entry:
29909 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
29910 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
29911 // CHECK20-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
29912 // CHECK20-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
29913 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
29914 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
29915 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29916 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29917 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29918 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29919 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
29920 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
29921 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
29922 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
29923 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
29924 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29925 // CHECK20-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29926 // CHECK20-NEXT:    store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
29927 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29928 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
29929 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29930 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29931 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
29932 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
29933 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
29934 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29935 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
29936 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
29937 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
29938 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29939 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29940 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
29941 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
29942 // CHECK20-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
29943 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
29944 // CHECK20:       omp.dispatch.cond:
29945 // CHECK20-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
29946 // CHECK20-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
29947 // CHECK20-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
29948 // CHECK20:       omp.dispatch.body:
29949 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29950 // CHECK20-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
29951 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29952 // CHECK20:       omp.inner.for.cond:
29953 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
29954 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !74
29955 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
29956 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29957 // CHECK20:       omp.inner.for.body:
29958 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
29959 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
29960 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29961 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !74
29962 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !74
29963 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
29964 // CHECK20-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !74
29965 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29966 // CHECK20:       omp.body.continue:
29967 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29968 // CHECK20:       omp.inner.for.inc:
29969 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
29970 // CHECK20-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
29971 // CHECK20-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
29972 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
29973 // CHECK20:       omp.inner.for.end:
29974 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
29975 // CHECK20:       omp.dispatch.inc:
29976 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
29977 // CHECK20:       omp.dispatch.end:
29978 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
29979 // CHECK20-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
29980 // CHECK20-NEXT:    br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
29981 // CHECK20:       .omp.final.then:
29982 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
29983 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
29984 // CHECK20:       .omp.final.done:
29985 // CHECK20-NEXT:    ret void
29986 //
29987 //
29988 // CHECK20-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
29989 // CHECK20-SAME: () #[[ATTR5:[0-9]+]] {
29990 // CHECK20-NEXT:  entry:
29991 // CHECK20-NEXT:    call void @__tgt_register_requires(i64 1)
29992 // CHECK20-NEXT:    ret void
29993 //
29994 //
29995 // CHECK21-LABEL: define {{[^@]+}}@main
29996 // CHECK21-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
29997 // CHECK21-NEXT:  entry:
29998 // CHECK21-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
29999 // CHECK21-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
30000 // CHECK21-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
30001 // CHECK21-NEXT:    [[N:%.*]] = alloca i32, align 4
30002 // CHECK21-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
30003 // CHECK21-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
30004 // CHECK21-NEXT:    [[M:%.*]] = alloca i32, align 4
30005 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30006 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30007 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30008 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30009 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30010 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
30011 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30012 // CHECK21-NEXT:    [[I3:%.*]] = alloca i32, align 4
30013 // CHECK21-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
30014 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
30015 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
30016 // CHECK21-NEXT:    [[DOTOMP_LB16:%.*]] = alloca i32, align 4
30017 // CHECK21-NEXT:    [[DOTOMP_UB17:%.*]] = alloca i32, align 4
30018 // CHECK21-NEXT:    [[I18:%.*]] = alloca i32, align 4
30019 // CHECK21-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
30020 // CHECK21-NEXT:    [[I22:%.*]] = alloca i32, align 4
30021 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
30022 // CHECK21-NEXT:    [[_TMP40:%.*]] = alloca i32, align 4
30023 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
30024 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
30025 // CHECK21-NEXT:    [[DOTOMP_LB46:%.*]] = alloca i32, align 4
30026 // CHECK21-NEXT:    [[DOTOMP_UB47:%.*]] = alloca i32, align 4
30027 // CHECK21-NEXT:    [[I48:%.*]] = alloca i32, align 4
30028 // CHECK21-NEXT:    [[DOTOMP_IV51:%.*]] = alloca i32, align 4
30029 // CHECK21-NEXT:    [[I52:%.*]] = alloca i32, align 4
30030 // CHECK21-NEXT:    [[_TMP69:%.*]] = alloca i32, align 4
30031 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_70:%.*]] = alloca i32, align 4
30032 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_71:%.*]] = alloca i32, align 4
30033 // CHECK21-NEXT:    [[DOTOMP_LB75:%.*]] = alloca i32, align 4
30034 // CHECK21-NEXT:    [[DOTOMP_UB76:%.*]] = alloca i32, align 4
30035 // CHECK21-NEXT:    [[I77:%.*]] = alloca i32, align 4
30036 // CHECK21-NEXT:    [[DOTOMP_IV80:%.*]] = alloca i32, align 4
30037 // CHECK21-NEXT:    [[I81:%.*]] = alloca i32, align 4
30038 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
30039 // CHECK21-NEXT:    [[_TMP99:%.*]] = alloca i32, align 4
30040 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_100:%.*]] = alloca i32, align 4
30041 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_101:%.*]] = alloca i32, align 4
30042 // CHECK21-NEXT:    [[DOTOMP_LB105:%.*]] = alloca i32, align 4
30043 // CHECK21-NEXT:    [[DOTOMP_UB106:%.*]] = alloca i32, align 4
30044 // CHECK21-NEXT:    [[I107:%.*]] = alloca i32, align 4
30045 // CHECK21-NEXT:    [[DOTOMP_IV110:%.*]] = alloca i32, align 4
30046 // CHECK21-NEXT:    [[I111:%.*]] = alloca i32, align 4
30047 // CHECK21-NEXT:    store i32 0, i32* [[RETVAL]], align 4
30048 // CHECK21-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
30049 // CHECK21-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
30050 // CHECK21-NEXT:    store i32 100, i32* [[N]], align 4
30051 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30052 // CHECK21-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
30053 // CHECK21-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
30054 // CHECK21-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
30055 // CHECK21-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
30056 // CHECK21-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
30057 // CHECK21-NEXT:    store i32 10, i32* [[M]], align 4
30058 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
30059 // CHECK21-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
30060 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30061 // CHECK21-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
30062 // CHECK21-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30063 // CHECK21-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30064 // CHECK21-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30065 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30066 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30067 // CHECK21-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
30068 // CHECK21-NEXT:    store i32 0, i32* [[I]], align 4
30069 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30070 // CHECK21-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
30071 // CHECK21-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30072 // CHECK21:       simd.if.then:
30073 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30074 // CHECK21-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
30075 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30076 // CHECK21:       omp.inner.for.cond:
30077 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30078 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
30079 // CHECK21-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
30080 // CHECK21-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30081 // CHECK21:       omp.inner.for.body:
30082 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30083 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
30084 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30085 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
30086 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
30087 // CHECK21-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
30088 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM]]
30089 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
30090 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30091 // CHECK21:       omp.body.continue:
30092 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30093 // CHECK21:       omp.inner.for.inc:
30094 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30095 // CHECK21-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1
30096 // CHECK21-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30097 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
30098 // CHECK21:       omp.inner.for.end:
30099 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30100 // CHECK21-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0
30101 // CHECK21-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
30102 // CHECK21-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
30103 // CHECK21-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
30104 // CHECK21-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
30105 // CHECK21-NEXT:    br label [[SIMD_IF_END]]
30106 // CHECK21:       simd.if.end:
30107 // CHECK21-NEXT:    [[TMP14:%.*]] = load i32, i32* [[N]], align 4
30108 // CHECK21-NEXT:    store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
30109 // CHECK21-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30110 // CHECK21-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0
30111 // CHECK21-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
30112 // CHECK21-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
30113 // CHECK21-NEXT:    store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4
30114 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB16]], align 4
30115 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4
30116 // CHECK21-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_UB17]], align 4
30117 // CHECK21-NEXT:    store i32 0, i32* [[I18]], align 4
30118 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30119 // CHECK21-NEXT:    [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]]
30120 // CHECK21-NEXT:    br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]]
30121 // CHECK21:       simd.if.then20:
30122 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4
30123 // CHECK21-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV21]], align 4
30124 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
30125 // CHECK21:       omp.inner.for.cond23:
30126 // CHECK21-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30127 // CHECK21-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !6
30128 // CHECK21-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
30129 // CHECK21-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
30130 // CHECK21:       omp.inner.for.body25:
30131 // CHECK21-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30132 // CHECK21-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1
30133 // CHECK21-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
30134 // CHECK21-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !6
30135 // CHECK21-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !6
30136 // CHECK21-NEXT:    [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64
30137 // CHECK21-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM28]]
30138 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !6
30139 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE30:%.*]]
30140 // CHECK21:       omp.body.continue30:
30141 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC31:%.*]]
30142 // CHECK21:       omp.inner.for.inc31:
30143 // CHECK21-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30144 // CHECK21-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1
30145 // CHECK21-NEXT:    store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30146 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]]
30147 // CHECK21:       omp.inner.for.end33:
30148 // CHECK21-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30149 // CHECK21-NEXT:    [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0
30150 // CHECK21-NEXT:    [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1
30151 // CHECK21-NEXT:    [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1
30152 // CHECK21-NEXT:    [[ADD37:%.*]] = add nsw i32 0, [[MUL36]]
30153 // CHECK21-NEXT:    store i32 [[ADD37]], i32* [[I22]], align 4
30154 // CHECK21-NEXT:    br label [[SIMD_IF_END38]]
30155 // CHECK21:       simd.if.end38:
30156 // CHECK21-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
30157 // CHECK21-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_39]], align 4
30158 // CHECK21-NEXT:    [[TMP26:%.*]] = load i32, i32* [[N]], align 4
30159 // CHECK21-NEXT:    store i32 [[TMP26]], i32* [[DOTCAPTURE_EXPR_41]], align 4
30160 // CHECK21-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30161 // CHECK21-NEXT:    [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0
30162 // CHECK21-NEXT:    [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1
30163 // CHECK21-NEXT:    [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1
30164 // CHECK21-NEXT:    store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4
30165 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB46]], align 4
30166 // CHECK21-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
30167 // CHECK21-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_UB47]], align 4
30168 // CHECK21-NEXT:    store i32 0, i32* [[I48]], align 4
30169 // CHECK21-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30170 // CHECK21-NEXT:    [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]]
30171 // CHECK21-NEXT:    br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]]
30172 // CHECK21:       simd.if.then50:
30173 // CHECK21-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_LB46]], align 4
30174 // CHECK21-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV51]], align 4
30175 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND53:%.*]]
30176 // CHECK21:       omp.inner.for.cond53:
30177 // CHECK21-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30178 // CHECK21-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_UB47]], align 4, !llvm.access.group !9
30179 // CHECK21-NEXT:    [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]]
30180 // CHECK21-NEXT:    br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]]
30181 // CHECK21:       omp.inner.for.body55:
30182 // CHECK21-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30183 // CHECK21-NEXT:    [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1
30184 // CHECK21-NEXT:    [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
30185 // CHECK21-NEXT:    store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !9
30186 // CHECK21-NEXT:    [[TMP34:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !9
30187 // CHECK21-NEXT:    [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64
30188 // CHECK21-NEXT:    [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM58]]
30189 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX59]], align 4, !llvm.access.group !9
30190 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE60:%.*]]
30191 // CHECK21:       omp.body.continue60:
30192 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC61:%.*]]
30193 // CHECK21:       omp.inner.for.inc61:
30194 // CHECK21-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30195 // CHECK21-NEXT:    [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1
30196 // CHECK21-NEXT:    store i32 [[ADD62]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30197 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]]
30198 // CHECK21:       omp.inner.for.end63:
30199 // CHECK21-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30200 // CHECK21-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0
30201 // CHECK21-NEXT:    [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1
30202 // CHECK21-NEXT:    [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1
30203 // CHECK21-NEXT:    [[ADD67:%.*]] = add nsw i32 0, [[MUL66]]
30204 // CHECK21-NEXT:    store i32 [[ADD67]], i32* [[I52]], align 4
30205 // CHECK21-NEXT:    br label [[SIMD_IF_END68]]
30206 // CHECK21:       simd.if.end68:
30207 // CHECK21-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N]], align 4
30208 // CHECK21-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_70]], align 4
30209 // CHECK21-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30210 // CHECK21-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[TMP38]], 0
30211 // CHECK21-NEXT:    [[DIV73:%.*]] = sdiv i32 [[SUB72]], 1
30212 // CHECK21-NEXT:    [[SUB74:%.*]] = sub nsw i32 [[DIV73]], 1
30213 // CHECK21-NEXT:    store i32 [[SUB74]], i32* [[DOTCAPTURE_EXPR_71]], align 4
30214 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB75]], align 4
30215 // CHECK21-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_71]], align 4
30216 // CHECK21-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_UB76]], align 4
30217 // CHECK21-NEXT:    store i32 0, i32* [[I77]], align 4
30218 // CHECK21-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30219 // CHECK21-NEXT:    [[CMP78:%.*]] = icmp slt i32 0, [[TMP40]]
30220 // CHECK21-NEXT:    br i1 [[CMP78]], label [[SIMD_IF_THEN79:%.*]], label [[SIMD_IF_END97:%.*]]
30221 // CHECK21:       simd.if.then79:
30222 // CHECK21-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_LB75]], align 4
30223 // CHECK21-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV80]], align 4
30224 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND82:%.*]]
30225 // CHECK21:       omp.inner.for.cond82:
30226 // CHECK21-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30227 // CHECK21-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_UB76]], align 4, !llvm.access.group !12
30228 // CHECK21-NEXT:    [[CMP83:%.*]] = icmp sle i32 [[TMP42]], [[TMP43]]
30229 // CHECK21-NEXT:    br i1 [[CMP83]], label [[OMP_INNER_FOR_BODY84:%.*]], label [[OMP_INNER_FOR_END92:%.*]]
30230 // CHECK21:       omp.inner.for.body84:
30231 // CHECK21-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30232 // CHECK21-NEXT:    [[MUL85:%.*]] = mul nsw i32 [[TMP44]], 1
30233 // CHECK21-NEXT:    [[ADD86:%.*]] = add nsw i32 0, [[MUL85]]
30234 // CHECK21-NEXT:    store i32 [[ADD86]], i32* [[I81]], align 4, !llvm.access.group !12
30235 // CHECK21-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I81]], align 4, !llvm.access.group !12
30236 // CHECK21-NEXT:    [[IDXPROM87:%.*]] = sext i32 [[TMP45]] to i64
30237 // CHECK21-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM87]]
30238 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX88]], align 4, !llvm.access.group !12
30239 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE89:%.*]]
30240 // CHECK21:       omp.body.continue89:
30241 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC90:%.*]]
30242 // CHECK21:       omp.inner.for.inc90:
30243 // CHECK21-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30244 // CHECK21-NEXT:    [[ADD91:%.*]] = add nsw i32 [[TMP46]], 1
30245 // CHECK21-NEXT:    store i32 [[ADD91]], i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30246 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND82]], !llvm.loop [[LOOP13:![0-9]+]]
30247 // CHECK21:       omp.inner.for.end92:
30248 // CHECK21-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30249 // CHECK21-NEXT:    [[SUB93:%.*]] = sub nsw i32 [[TMP47]], 0
30250 // CHECK21-NEXT:    [[DIV94:%.*]] = sdiv i32 [[SUB93]], 1
30251 // CHECK21-NEXT:    [[MUL95:%.*]] = mul nsw i32 [[DIV94]], 1
30252 // CHECK21-NEXT:    [[ADD96:%.*]] = add nsw i32 0, [[MUL95]]
30253 // CHECK21-NEXT:    store i32 [[ADD96]], i32* [[I81]], align 4
30254 // CHECK21-NEXT:    br label [[SIMD_IF_END97]]
30255 // CHECK21:       simd.if.end97:
30256 // CHECK21-NEXT:    [[TMP48:%.*]] = load i32, i32* [[M]], align 4
30257 // CHECK21-NEXT:    store i32 [[TMP48]], i32* [[DOTCAPTURE_EXPR_98]], align 4
30258 // CHECK21-NEXT:    [[TMP49:%.*]] = load i32, i32* [[N]], align 4
30259 // CHECK21-NEXT:    store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_100]], align 4
30260 // CHECK21-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30261 // CHECK21-NEXT:    [[SUB102:%.*]] = sub nsw i32 [[TMP50]], 0
30262 // CHECK21-NEXT:    [[DIV103:%.*]] = sdiv i32 [[SUB102]], 1
30263 // CHECK21-NEXT:    [[SUB104:%.*]] = sub nsw i32 [[DIV103]], 1
30264 // CHECK21-NEXT:    store i32 [[SUB104]], i32* [[DOTCAPTURE_EXPR_101]], align 4
30265 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB105]], align 4
30266 // CHECK21-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_101]], align 4
30267 // CHECK21-NEXT:    store i32 [[TMP51]], i32* [[DOTOMP_UB106]], align 4
30268 // CHECK21-NEXT:    store i32 0, i32* [[I107]], align 4
30269 // CHECK21-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30270 // CHECK21-NEXT:    [[CMP108:%.*]] = icmp slt i32 0, [[TMP52]]
30271 // CHECK21-NEXT:    br i1 [[CMP108]], label [[SIMD_IF_THEN109:%.*]], label [[SIMD_IF_END127:%.*]]
30272 // CHECK21:       simd.if.then109:
30273 // CHECK21-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_LB105]], align 4
30274 // CHECK21-NEXT:    store i32 [[TMP53]], i32* [[DOTOMP_IV110]], align 4
30275 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND112:%.*]]
30276 // CHECK21:       omp.inner.for.cond112:
30277 // CHECK21-NEXT:    [[TMP54:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30278 // CHECK21-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_UB106]], align 4, !llvm.access.group !15
30279 // CHECK21-NEXT:    [[CMP113:%.*]] = icmp sle i32 [[TMP54]], [[TMP55]]
30280 // CHECK21-NEXT:    br i1 [[CMP113]], label [[OMP_INNER_FOR_BODY114:%.*]], label [[OMP_INNER_FOR_END122:%.*]]
30281 // CHECK21:       omp.inner.for.body114:
30282 // CHECK21-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30283 // CHECK21-NEXT:    [[MUL115:%.*]] = mul nsw i32 [[TMP56]], 1
30284 // CHECK21-NEXT:    [[ADD116:%.*]] = add nsw i32 0, [[MUL115]]
30285 // CHECK21-NEXT:    store i32 [[ADD116]], i32* [[I111]], align 4, !llvm.access.group !15
30286 // CHECK21-NEXT:    [[TMP57:%.*]] = load i32, i32* [[I111]], align 4, !llvm.access.group !15
30287 // CHECK21-NEXT:    [[IDXPROM117:%.*]] = sext i32 [[TMP57]] to i64
30288 // CHECK21-NEXT:    [[ARRAYIDX118:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM117]]
30289 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX118]], align 4, !llvm.access.group !15
30290 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE119:%.*]]
30291 // CHECK21:       omp.body.continue119:
30292 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC120:%.*]]
30293 // CHECK21:       omp.inner.for.inc120:
30294 // CHECK21-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30295 // CHECK21-NEXT:    [[ADD121:%.*]] = add nsw i32 [[TMP58]], 1
30296 // CHECK21-NEXT:    store i32 [[ADD121]], i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30297 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND112]], !llvm.loop [[LOOP16:![0-9]+]]
30298 // CHECK21:       omp.inner.for.end122:
30299 // CHECK21-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30300 // CHECK21-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[TMP59]], 0
30301 // CHECK21-NEXT:    [[DIV124:%.*]] = sdiv i32 [[SUB123]], 1
30302 // CHECK21-NEXT:    [[MUL125:%.*]] = mul nsw i32 [[DIV124]], 1
30303 // CHECK21-NEXT:    [[ADD126:%.*]] = add nsw i32 0, [[MUL125]]
30304 // CHECK21-NEXT:    store i32 [[ADD126]], i32* [[I111]], align 4
30305 // CHECK21-NEXT:    br label [[SIMD_IF_END127]]
30306 // CHECK21:       simd.if.end127:
30307 // CHECK21-NEXT:    [[TMP60:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
30308 // CHECK21-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP60]])
30309 // CHECK21-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
30310 // CHECK21-NEXT:    [[TMP61:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
30311 // CHECK21-NEXT:    call void @llvm.stackrestore(i8* [[TMP61]])
30312 // CHECK21-NEXT:    [[TMP62:%.*]] = load i32, i32* [[RETVAL]], align 4
30313 // CHECK21-NEXT:    ret i32 [[TMP62]]
30314 //
30315 //
30316 // CHECK21-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
30317 // CHECK21-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
30318 // CHECK21-NEXT:  entry:
30319 // CHECK21-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
30320 // CHECK21-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
30321 // CHECK21-NEXT:    [[M:%.*]] = alloca i32, align 4
30322 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30323 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30324 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30325 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30326 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
30327 // CHECK21-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
30328 // CHECK21-NEXT:    [[DOTOMP_LB3:%.*]] = alloca i32, align 4
30329 // CHECK21-NEXT:    [[DOTOMP_UB4:%.*]] = alloca i32, align 4
30330 // CHECK21-NEXT:    [[DOTOMP_IV5:%.*]] = alloca i32, align 4
30331 // CHECK21-NEXT:    [[I6:%.*]] = alloca i32, align 4
30332 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30333 // CHECK21-NEXT:    [[_TMP18:%.*]] = alloca i32, align 4
30334 // CHECK21-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30335 // CHECK21-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30336 // CHECK21-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
30337 // CHECK21-NEXT:    [[I22:%.*]] = alloca i32, align 4
30338 // CHECK21-NEXT:    [[_TMP34:%.*]] = alloca i32, align 4
30339 // CHECK21-NEXT:    [[DOTOMP_LB35:%.*]] = alloca i32, align 4
30340 // CHECK21-NEXT:    [[DOTOMP_UB36:%.*]] = alloca i32, align 4
30341 // CHECK21-NEXT:    [[DOTOMP_IV37:%.*]] = alloca i32, align 4
30342 // CHECK21-NEXT:    [[I38:%.*]] = alloca i32, align 4
30343 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
30344 // CHECK21-NEXT:    [[_TMP51:%.*]] = alloca i32, align 4
30345 // CHECK21-NEXT:    [[DOTOMP_LB52:%.*]] = alloca i32, align 4
30346 // CHECK21-NEXT:    [[DOTOMP_UB53:%.*]] = alloca i32, align 4
30347 // CHECK21-NEXT:    [[DOTOMP_IV54:%.*]] = alloca i32, align 4
30348 // CHECK21-NEXT:    [[I55:%.*]] = alloca i32, align 4
30349 // CHECK21-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
30350 // CHECK21-NEXT:    store i32 10, i32* [[M]], align 4
30351 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30352 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
30353 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30354 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
30355 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30356 // CHECK21:       omp.inner.for.cond:
30357 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30358 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
30359 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
30360 // CHECK21-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30361 // CHECK21:       omp.inner.for.body:
30362 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30363 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
30364 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30365 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
30366 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !18
30367 // CHECK21-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
30368 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
30369 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
30370 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30371 // CHECK21:       omp.body.continue:
30372 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30373 // CHECK21:       omp.inner.for.inc:
30374 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30375 // CHECK21-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
30376 // CHECK21-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30377 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
30378 // CHECK21:       omp.inner.for.end:
30379 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
30380 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB3]], align 4
30381 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB4]], align 4
30382 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4
30383 // CHECK21-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4
30384 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND7:%.*]]
30385 // CHECK21:       omp.inner.for.cond7:
30386 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30387 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !21
30388 // CHECK21-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
30389 // CHECK21-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
30390 // CHECK21:       omp.inner.for.body9:
30391 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30392 // CHECK21-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
30393 // CHECK21-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
30394 // CHECK21-NEXT:    store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !21
30395 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !21
30396 // CHECK21-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64
30397 // CHECK21-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM12]]
30398 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !21
30399 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE14:%.*]]
30400 // CHECK21:       omp.body.continue14:
30401 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC15:%.*]]
30402 // CHECK21:       omp.inner.for.inc15:
30403 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30404 // CHECK21-NEXT:    [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1
30405 // CHECK21-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30406 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]]
30407 // CHECK21:       omp.inner.for.end17:
30408 // CHECK21-NEXT:    store i32 10, i32* [[I6]], align 4
30409 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[M]], align 4
30410 // CHECK21-NEXT:    store i32 [[TMP12]], i32* [[DOTCAPTURE_EXPR_]], align 4
30411 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30412 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB20]], align 4
30413 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30414 // CHECK21-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV21]], align 4
30415 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
30416 // CHECK21:       omp.inner.for.cond23:
30417 // CHECK21-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30418 // CHECK21-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !24
30419 // CHECK21-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
30420 // CHECK21-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
30421 // CHECK21:       omp.inner.for.body25:
30422 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30423 // CHECK21-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP16]], 1
30424 // CHECK21-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
30425 // CHECK21-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !24
30426 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !24
30427 // CHECK21-NEXT:    [[IDXPROM28:%.*]] = sext i32 [[TMP17]] to i64
30428 // CHECK21-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM28]]
30429 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !24
30430 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE30:%.*]]
30431 // CHECK21:       omp.body.continue30:
30432 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC31:%.*]]
30433 // CHECK21:       omp.inner.for.inc31:
30434 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30435 // CHECK21-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP18]], 1
30436 // CHECK21-NEXT:    store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30437 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP25:![0-9]+]]
30438 // CHECK21:       omp.inner.for.end33:
30439 // CHECK21-NEXT:    store i32 10, i32* [[I22]], align 4
30440 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB35]], align 4
30441 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB36]], align 4
30442 // CHECK21-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB35]], align 4
30443 // CHECK21-NEXT:    store i32 [[TMP19]], i32* [[DOTOMP_IV37]], align 4
30444 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND39:%.*]]
30445 // CHECK21:       omp.inner.for.cond39:
30446 // CHECK21-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30447 // CHECK21-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB36]], align 4, !llvm.access.group !27
30448 // CHECK21-NEXT:    [[CMP40:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
30449 // CHECK21-NEXT:    br i1 [[CMP40]], label [[OMP_INNER_FOR_BODY41:%.*]], label [[OMP_INNER_FOR_END49:%.*]]
30450 // CHECK21:       omp.inner.for.body41:
30451 // CHECK21-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30452 // CHECK21-NEXT:    [[MUL42:%.*]] = mul nsw i32 [[TMP22]], 1
30453 // CHECK21-NEXT:    [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
30454 // CHECK21-NEXT:    store i32 [[ADD43]], i32* [[I38]], align 4, !llvm.access.group !27
30455 // CHECK21-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I38]], align 4, !llvm.access.group !27
30456 // CHECK21-NEXT:    [[IDXPROM44:%.*]] = sext i32 [[TMP23]] to i64
30457 // CHECK21-NEXT:    [[ARRAYIDX45:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM44]]
30458 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX45]], align 4, !llvm.access.group !27
30459 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE46:%.*]]
30460 // CHECK21:       omp.body.continue46:
30461 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC47:%.*]]
30462 // CHECK21:       omp.inner.for.inc47:
30463 // CHECK21-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30464 // CHECK21-NEXT:    [[ADD48:%.*]] = add nsw i32 [[TMP24]], 1
30465 // CHECK21-NEXT:    store i32 [[ADD48]], i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30466 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND39]], !llvm.loop [[LOOP28:![0-9]+]]
30467 // CHECK21:       omp.inner.for.end49:
30468 // CHECK21-NEXT:    store i32 10, i32* [[I38]], align 4
30469 // CHECK21-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
30470 // CHECK21-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_50]], align 4
30471 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB52]], align 4
30472 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB53]], align 4
30473 // CHECK21-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB52]], align 4
30474 // CHECK21-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV54]], align 4
30475 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND56:%.*]]
30476 // CHECK21:       omp.inner.for.cond56:
30477 // CHECK21-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30478 // CHECK21-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB53]], align 4, !llvm.access.group !30
30479 // CHECK21-NEXT:    [[CMP57:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
30480 // CHECK21-NEXT:    br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
30481 // CHECK21:       omp.inner.for.body58:
30482 // CHECK21-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30483 // CHECK21-NEXT:    [[MUL59:%.*]] = mul nsw i32 [[TMP29]], 1
30484 // CHECK21-NEXT:    [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
30485 // CHECK21-NEXT:    store i32 [[ADD60]], i32* [[I55]], align 4, !llvm.access.group !30
30486 // CHECK21-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I55]], align 4, !llvm.access.group !30
30487 // CHECK21-NEXT:    [[IDXPROM61:%.*]] = sext i32 [[TMP30]] to i64
30488 // CHECK21-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM61]]
30489 // CHECK21-NEXT:    store i32 0, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !30
30490 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE63:%.*]]
30491 // CHECK21:       omp.body.continue63:
30492 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC64:%.*]]
30493 // CHECK21:       omp.inner.for.inc64:
30494 // CHECK21-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30495 // CHECK21-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP31]], 1
30496 // CHECK21-NEXT:    store i32 [[ADD65]], i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30497 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP31:![0-9]+]]
30498 // CHECK21:       omp.inner.for.end66:
30499 // CHECK21-NEXT:    store i32 10, i32* [[I55]], align 4
30500 // CHECK21-NEXT:    ret i32 0
30501 //
30502 //
30503 // CHECK22-LABEL: define {{[^@]+}}@main
30504 // CHECK22-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
30505 // CHECK22-NEXT:  entry:
30506 // CHECK22-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
30507 // CHECK22-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
30508 // CHECK22-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
30509 // CHECK22-NEXT:    [[N:%.*]] = alloca i32, align 4
30510 // CHECK22-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
30511 // CHECK22-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
30512 // CHECK22-NEXT:    [[M:%.*]] = alloca i32, align 4
30513 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30514 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30515 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30516 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30517 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30518 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
30519 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30520 // CHECK22-NEXT:    [[I3:%.*]] = alloca i32, align 4
30521 // CHECK22-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
30522 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
30523 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
30524 // CHECK22-NEXT:    [[DOTOMP_LB16:%.*]] = alloca i32, align 4
30525 // CHECK22-NEXT:    [[DOTOMP_UB17:%.*]] = alloca i32, align 4
30526 // CHECK22-NEXT:    [[I18:%.*]] = alloca i32, align 4
30527 // CHECK22-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
30528 // CHECK22-NEXT:    [[I22:%.*]] = alloca i32, align 4
30529 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
30530 // CHECK22-NEXT:    [[_TMP40:%.*]] = alloca i32, align 4
30531 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
30532 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
30533 // CHECK22-NEXT:    [[DOTOMP_LB46:%.*]] = alloca i32, align 4
30534 // CHECK22-NEXT:    [[DOTOMP_UB47:%.*]] = alloca i32, align 4
30535 // CHECK22-NEXT:    [[I48:%.*]] = alloca i32, align 4
30536 // CHECK22-NEXT:    [[DOTOMP_IV51:%.*]] = alloca i32, align 4
30537 // CHECK22-NEXT:    [[I52:%.*]] = alloca i32, align 4
30538 // CHECK22-NEXT:    [[_TMP69:%.*]] = alloca i32, align 4
30539 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_70:%.*]] = alloca i32, align 4
30540 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_71:%.*]] = alloca i32, align 4
30541 // CHECK22-NEXT:    [[DOTOMP_LB75:%.*]] = alloca i32, align 4
30542 // CHECK22-NEXT:    [[DOTOMP_UB76:%.*]] = alloca i32, align 4
30543 // CHECK22-NEXT:    [[I77:%.*]] = alloca i32, align 4
30544 // CHECK22-NEXT:    [[DOTOMP_IV80:%.*]] = alloca i32, align 4
30545 // CHECK22-NEXT:    [[I81:%.*]] = alloca i32, align 4
30546 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
30547 // CHECK22-NEXT:    [[_TMP99:%.*]] = alloca i32, align 4
30548 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_100:%.*]] = alloca i32, align 4
30549 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_101:%.*]] = alloca i32, align 4
30550 // CHECK22-NEXT:    [[DOTOMP_LB105:%.*]] = alloca i32, align 4
30551 // CHECK22-NEXT:    [[DOTOMP_UB106:%.*]] = alloca i32, align 4
30552 // CHECK22-NEXT:    [[I107:%.*]] = alloca i32, align 4
30553 // CHECK22-NEXT:    [[DOTOMP_IV110:%.*]] = alloca i32, align 4
30554 // CHECK22-NEXT:    [[I111:%.*]] = alloca i32, align 4
30555 // CHECK22-NEXT:    store i32 0, i32* [[RETVAL]], align 4
30556 // CHECK22-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
30557 // CHECK22-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
30558 // CHECK22-NEXT:    store i32 100, i32* [[N]], align 4
30559 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30560 // CHECK22-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
30561 // CHECK22-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
30562 // CHECK22-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
30563 // CHECK22-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
30564 // CHECK22-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
30565 // CHECK22-NEXT:    store i32 10, i32* [[M]], align 4
30566 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N]], align 4
30567 // CHECK22-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
30568 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30569 // CHECK22-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
30570 // CHECK22-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30571 // CHECK22-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30572 // CHECK22-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30573 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30574 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30575 // CHECK22-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
30576 // CHECK22-NEXT:    store i32 0, i32* [[I]], align 4
30577 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30578 // CHECK22-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
30579 // CHECK22-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30580 // CHECK22:       simd.if.then:
30581 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30582 // CHECK22-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
30583 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30584 // CHECK22:       omp.inner.for.cond:
30585 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30586 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
30587 // CHECK22-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
30588 // CHECK22-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30589 // CHECK22:       omp.inner.for.body:
30590 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30591 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
30592 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30593 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
30594 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
30595 // CHECK22-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
30596 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM]]
30597 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
30598 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30599 // CHECK22:       omp.body.continue:
30600 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30601 // CHECK22:       omp.inner.for.inc:
30602 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30603 // CHECK22-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1
30604 // CHECK22-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
30605 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
30606 // CHECK22:       omp.inner.for.end:
30607 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30608 // CHECK22-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0
30609 // CHECK22-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
30610 // CHECK22-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
30611 // CHECK22-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
30612 // CHECK22-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
30613 // CHECK22-NEXT:    br label [[SIMD_IF_END]]
30614 // CHECK22:       simd.if.end:
30615 // CHECK22-NEXT:    [[TMP14:%.*]] = load i32, i32* [[N]], align 4
30616 // CHECK22-NEXT:    store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
30617 // CHECK22-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30618 // CHECK22-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0
30619 // CHECK22-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
30620 // CHECK22-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
30621 // CHECK22-NEXT:    store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4
30622 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB16]], align 4
30623 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4
30624 // CHECK22-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_UB17]], align 4
30625 // CHECK22-NEXT:    store i32 0, i32* [[I18]], align 4
30626 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30627 // CHECK22-NEXT:    [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]]
30628 // CHECK22-NEXT:    br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]]
30629 // CHECK22:       simd.if.then20:
30630 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4
30631 // CHECK22-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV21]], align 4
30632 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
30633 // CHECK22:       omp.inner.for.cond23:
30634 // CHECK22-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30635 // CHECK22-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !6
30636 // CHECK22-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
30637 // CHECK22-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
30638 // CHECK22:       omp.inner.for.body25:
30639 // CHECK22-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30640 // CHECK22-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1
30641 // CHECK22-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
30642 // CHECK22-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !6
30643 // CHECK22-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !6
30644 // CHECK22-NEXT:    [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64
30645 // CHECK22-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM28]]
30646 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !6
30647 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE30:%.*]]
30648 // CHECK22:       omp.body.continue30:
30649 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC31:%.*]]
30650 // CHECK22:       omp.inner.for.inc31:
30651 // CHECK22-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30652 // CHECK22-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1
30653 // CHECK22-NEXT:    store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6
30654 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]]
30655 // CHECK22:       omp.inner.for.end33:
30656 // CHECK22-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
30657 // CHECK22-NEXT:    [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0
30658 // CHECK22-NEXT:    [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1
30659 // CHECK22-NEXT:    [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1
30660 // CHECK22-NEXT:    [[ADD37:%.*]] = add nsw i32 0, [[MUL36]]
30661 // CHECK22-NEXT:    store i32 [[ADD37]], i32* [[I22]], align 4
30662 // CHECK22-NEXT:    br label [[SIMD_IF_END38]]
30663 // CHECK22:       simd.if.end38:
30664 // CHECK22-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
30665 // CHECK22-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_39]], align 4
30666 // CHECK22-NEXT:    [[TMP26:%.*]] = load i32, i32* [[N]], align 4
30667 // CHECK22-NEXT:    store i32 [[TMP26]], i32* [[DOTCAPTURE_EXPR_41]], align 4
30668 // CHECK22-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30669 // CHECK22-NEXT:    [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0
30670 // CHECK22-NEXT:    [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1
30671 // CHECK22-NEXT:    [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1
30672 // CHECK22-NEXT:    store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4
30673 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB46]], align 4
30674 // CHECK22-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
30675 // CHECK22-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_UB47]], align 4
30676 // CHECK22-NEXT:    store i32 0, i32* [[I48]], align 4
30677 // CHECK22-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30678 // CHECK22-NEXT:    [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]]
30679 // CHECK22-NEXT:    br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]]
30680 // CHECK22:       simd.if.then50:
30681 // CHECK22-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_LB46]], align 4
30682 // CHECK22-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV51]], align 4
30683 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND53:%.*]]
30684 // CHECK22:       omp.inner.for.cond53:
30685 // CHECK22-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30686 // CHECK22-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_UB47]], align 4, !llvm.access.group !9
30687 // CHECK22-NEXT:    [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]]
30688 // CHECK22-NEXT:    br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]]
30689 // CHECK22:       omp.inner.for.body55:
30690 // CHECK22-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30691 // CHECK22-NEXT:    [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1
30692 // CHECK22-NEXT:    [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
30693 // CHECK22-NEXT:    store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !9
30694 // CHECK22-NEXT:    [[TMP34:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !9
30695 // CHECK22-NEXT:    [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64
30696 // CHECK22-NEXT:    [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM58]]
30697 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX59]], align 4, !llvm.access.group !9
30698 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE60:%.*]]
30699 // CHECK22:       omp.body.continue60:
30700 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC61:%.*]]
30701 // CHECK22:       omp.inner.for.inc61:
30702 // CHECK22-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30703 // CHECK22-NEXT:    [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1
30704 // CHECK22-NEXT:    store i32 [[ADD62]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9
30705 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]]
30706 // CHECK22:       omp.inner.for.end63:
30707 // CHECK22-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
30708 // CHECK22-NEXT:    [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0
30709 // CHECK22-NEXT:    [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1
30710 // CHECK22-NEXT:    [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1
30711 // CHECK22-NEXT:    [[ADD67:%.*]] = add nsw i32 0, [[MUL66]]
30712 // CHECK22-NEXT:    store i32 [[ADD67]], i32* [[I52]], align 4
30713 // CHECK22-NEXT:    br label [[SIMD_IF_END68]]
30714 // CHECK22:       simd.if.end68:
30715 // CHECK22-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N]], align 4
30716 // CHECK22-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_70]], align 4
30717 // CHECK22-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30718 // CHECK22-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[TMP38]], 0
30719 // CHECK22-NEXT:    [[DIV73:%.*]] = sdiv i32 [[SUB72]], 1
30720 // CHECK22-NEXT:    [[SUB74:%.*]] = sub nsw i32 [[DIV73]], 1
30721 // CHECK22-NEXT:    store i32 [[SUB74]], i32* [[DOTCAPTURE_EXPR_71]], align 4
30722 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB75]], align 4
30723 // CHECK22-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_71]], align 4
30724 // CHECK22-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_UB76]], align 4
30725 // CHECK22-NEXT:    store i32 0, i32* [[I77]], align 4
30726 // CHECK22-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30727 // CHECK22-NEXT:    [[CMP78:%.*]] = icmp slt i32 0, [[TMP40]]
30728 // CHECK22-NEXT:    br i1 [[CMP78]], label [[SIMD_IF_THEN79:%.*]], label [[SIMD_IF_END97:%.*]]
30729 // CHECK22:       simd.if.then79:
30730 // CHECK22-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_LB75]], align 4
30731 // CHECK22-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV80]], align 4
30732 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND82:%.*]]
30733 // CHECK22:       omp.inner.for.cond82:
30734 // CHECK22-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30735 // CHECK22-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_UB76]], align 4, !llvm.access.group !12
30736 // CHECK22-NEXT:    [[CMP83:%.*]] = icmp sle i32 [[TMP42]], [[TMP43]]
30737 // CHECK22-NEXT:    br i1 [[CMP83]], label [[OMP_INNER_FOR_BODY84:%.*]], label [[OMP_INNER_FOR_END92:%.*]]
30738 // CHECK22:       omp.inner.for.body84:
30739 // CHECK22-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30740 // CHECK22-NEXT:    [[MUL85:%.*]] = mul nsw i32 [[TMP44]], 1
30741 // CHECK22-NEXT:    [[ADD86:%.*]] = add nsw i32 0, [[MUL85]]
30742 // CHECK22-NEXT:    store i32 [[ADD86]], i32* [[I81]], align 4, !llvm.access.group !12
30743 // CHECK22-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I81]], align 4, !llvm.access.group !12
30744 // CHECK22-NEXT:    [[IDXPROM87:%.*]] = sext i32 [[TMP45]] to i64
30745 // CHECK22-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM87]]
30746 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX88]], align 4, !llvm.access.group !12
30747 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE89:%.*]]
30748 // CHECK22:       omp.body.continue89:
30749 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC90:%.*]]
30750 // CHECK22:       omp.inner.for.inc90:
30751 // CHECK22-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30752 // CHECK22-NEXT:    [[ADD91:%.*]] = add nsw i32 [[TMP46]], 1
30753 // CHECK22-NEXT:    store i32 [[ADD91]], i32* [[DOTOMP_IV80]], align 4, !llvm.access.group !12
30754 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND82]], !llvm.loop [[LOOP13:![0-9]+]]
30755 // CHECK22:       omp.inner.for.end92:
30756 // CHECK22-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_70]], align 4
30757 // CHECK22-NEXT:    [[SUB93:%.*]] = sub nsw i32 [[TMP47]], 0
30758 // CHECK22-NEXT:    [[DIV94:%.*]] = sdiv i32 [[SUB93]], 1
30759 // CHECK22-NEXT:    [[MUL95:%.*]] = mul nsw i32 [[DIV94]], 1
30760 // CHECK22-NEXT:    [[ADD96:%.*]] = add nsw i32 0, [[MUL95]]
30761 // CHECK22-NEXT:    store i32 [[ADD96]], i32* [[I81]], align 4
30762 // CHECK22-NEXT:    br label [[SIMD_IF_END97]]
30763 // CHECK22:       simd.if.end97:
30764 // CHECK22-NEXT:    [[TMP48:%.*]] = load i32, i32* [[M]], align 4
30765 // CHECK22-NEXT:    store i32 [[TMP48]], i32* [[DOTCAPTURE_EXPR_98]], align 4
30766 // CHECK22-NEXT:    [[TMP49:%.*]] = load i32, i32* [[N]], align 4
30767 // CHECK22-NEXT:    store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_100]], align 4
30768 // CHECK22-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30769 // CHECK22-NEXT:    [[SUB102:%.*]] = sub nsw i32 [[TMP50]], 0
30770 // CHECK22-NEXT:    [[DIV103:%.*]] = sdiv i32 [[SUB102]], 1
30771 // CHECK22-NEXT:    [[SUB104:%.*]] = sub nsw i32 [[DIV103]], 1
30772 // CHECK22-NEXT:    store i32 [[SUB104]], i32* [[DOTCAPTURE_EXPR_101]], align 4
30773 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB105]], align 4
30774 // CHECK22-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_101]], align 4
30775 // CHECK22-NEXT:    store i32 [[TMP51]], i32* [[DOTOMP_UB106]], align 4
30776 // CHECK22-NEXT:    store i32 0, i32* [[I107]], align 4
30777 // CHECK22-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30778 // CHECK22-NEXT:    [[CMP108:%.*]] = icmp slt i32 0, [[TMP52]]
30779 // CHECK22-NEXT:    br i1 [[CMP108]], label [[SIMD_IF_THEN109:%.*]], label [[SIMD_IF_END127:%.*]]
30780 // CHECK22:       simd.if.then109:
30781 // CHECK22-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_LB105]], align 4
30782 // CHECK22-NEXT:    store i32 [[TMP53]], i32* [[DOTOMP_IV110]], align 4
30783 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND112:%.*]]
30784 // CHECK22:       omp.inner.for.cond112:
30785 // CHECK22-NEXT:    [[TMP54:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30786 // CHECK22-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_UB106]], align 4, !llvm.access.group !15
30787 // CHECK22-NEXT:    [[CMP113:%.*]] = icmp sle i32 [[TMP54]], [[TMP55]]
30788 // CHECK22-NEXT:    br i1 [[CMP113]], label [[OMP_INNER_FOR_BODY114:%.*]], label [[OMP_INNER_FOR_END122:%.*]]
30789 // CHECK22:       omp.inner.for.body114:
30790 // CHECK22-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30791 // CHECK22-NEXT:    [[MUL115:%.*]] = mul nsw i32 [[TMP56]], 1
30792 // CHECK22-NEXT:    [[ADD116:%.*]] = add nsw i32 0, [[MUL115]]
30793 // CHECK22-NEXT:    store i32 [[ADD116]], i32* [[I111]], align 4, !llvm.access.group !15
30794 // CHECK22-NEXT:    [[TMP57:%.*]] = load i32, i32* [[I111]], align 4, !llvm.access.group !15
30795 // CHECK22-NEXT:    [[IDXPROM117:%.*]] = sext i32 [[TMP57]] to i64
30796 // CHECK22-NEXT:    [[ARRAYIDX118:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM117]]
30797 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX118]], align 4, !llvm.access.group !15
30798 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE119:%.*]]
30799 // CHECK22:       omp.body.continue119:
30800 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC120:%.*]]
30801 // CHECK22:       omp.inner.for.inc120:
30802 // CHECK22-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30803 // CHECK22-NEXT:    [[ADD121:%.*]] = add nsw i32 [[TMP58]], 1
30804 // CHECK22-NEXT:    store i32 [[ADD121]], i32* [[DOTOMP_IV110]], align 4, !llvm.access.group !15
30805 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND112]], !llvm.loop [[LOOP16:![0-9]+]]
30806 // CHECK22:       omp.inner.for.end122:
30807 // CHECK22-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_100]], align 4
30808 // CHECK22-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[TMP59]], 0
30809 // CHECK22-NEXT:    [[DIV124:%.*]] = sdiv i32 [[SUB123]], 1
30810 // CHECK22-NEXT:    [[MUL125:%.*]] = mul nsw i32 [[DIV124]], 1
30811 // CHECK22-NEXT:    [[ADD126:%.*]] = add nsw i32 0, [[MUL125]]
30812 // CHECK22-NEXT:    store i32 [[ADD126]], i32* [[I111]], align 4
30813 // CHECK22-NEXT:    br label [[SIMD_IF_END127]]
30814 // CHECK22:       simd.if.end127:
30815 // CHECK22-NEXT:    [[TMP60:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
30816 // CHECK22-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP60]])
30817 // CHECK22-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
30818 // CHECK22-NEXT:    [[TMP61:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
30819 // CHECK22-NEXT:    call void @llvm.stackrestore(i8* [[TMP61]])
30820 // CHECK22-NEXT:    [[TMP62:%.*]] = load i32, i32* [[RETVAL]], align 4
30821 // CHECK22-NEXT:    ret i32 [[TMP62]]
30822 //
30823 //
30824 // CHECK22-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
30825 // CHECK22-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
30826 // CHECK22-NEXT:  entry:
30827 // CHECK22-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
30828 // CHECK22-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
30829 // CHECK22-NEXT:    [[M:%.*]] = alloca i32, align 4
30830 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30831 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30832 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30833 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30834 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
30835 // CHECK22-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
30836 // CHECK22-NEXT:    [[DOTOMP_LB3:%.*]] = alloca i32, align 4
30837 // CHECK22-NEXT:    [[DOTOMP_UB4:%.*]] = alloca i32, align 4
30838 // CHECK22-NEXT:    [[DOTOMP_IV5:%.*]] = alloca i32, align 4
30839 // CHECK22-NEXT:    [[I6:%.*]] = alloca i32, align 4
30840 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30841 // CHECK22-NEXT:    [[_TMP18:%.*]] = alloca i32, align 4
30842 // CHECK22-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30843 // CHECK22-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30844 // CHECK22-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
30845 // CHECK22-NEXT:    [[I22:%.*]] = alloca i32, align 4
30846 // CHECK22-NEXT:    [[_TMP34:%.*]] = alloca i32, align 4
30847 // CHECK22-NEXT:    [[DOTOMP_LB35:%.*]] = alloca i32, align 4
30848 // CHECK22-NEXT:    [[DOTOMP_UB36:%.*]] = alloca i32, align 4
30849 // CHECK22-NEXT:    [[DOTOMP_IV37:%.*]] = alloca i32, align 4
30850 // CHECK22-NEXT:    [[I38:%.*]] = alloca i32, align 4
30851 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
30852 // CHECK22-NEXT:    [[_TMP51:%.*]] = alloca i32, align 4
30853 // CHECK22-NEXT:    [[DOTOMP_LB52:%.*]] = alloca i32, align 4
30854 // CHECK22-NEXT:    [[DOTOMP_UB53:%.*]] = alloca i32, align 4
30855 // CHECK22-NEXT:    [[DOTOMP_IV54:%.*]] = alloca i32, align 4
30856 // CHECK22-NEXT:    [[I55:%.*]] = alloca i32, align 4
30857 // CHECK22-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
30858 // CHECK22-NEXT:    store i32 10, i32* [[M]], align 4
30859 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30860 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
30861 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30862 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
30863 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30864 // CHECK22:       omp.inner.for.cond:
30865 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30866 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
30867 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
30868 // CHECK22-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30869 // CHECK22:       omp.inner.for.body:
30870 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30871 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
30872 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30873 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
30874 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !18
30875 // CHECK22-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
30876 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
30877 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
30878 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30879 // CHECK22:       omp.body.continue:
30880 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30881 // CHECK22:       omp.inner.for.inc:
30882 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30883 // CHECK22-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
30884 // CHECK22-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
30885 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
30886 // CHECK22:       omp.inner.for.end:
30887 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
30888 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB3]], align 4
30889 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB4]], align 4
30890 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4
30891 // CHECK22-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4
30892 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND7:%.*]]
30893 // CHECK22:       omp.inner.for.cond7:
30894 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30895 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !21
30896 // CHECK22-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
30897 // CHECK22-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
30898 // CHECK22:       omp.inner.for.body9:
30899 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30900 // CHECK22-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
30901 // CHECK22-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
30902 // CHECK22-NEXT:    store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !21
30903 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !21
30904 // CHECK22-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64
30905 // CHECK22-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM12]]
30906 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !21
30907 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE14:%.*]]
30908 // CHECK22:       omp.body.continue14:
30909 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC15:%.*]]
30910 // CHECK22:       omp.inner.for.inc15:
30911 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30912 // CHECK22-NEXT:    [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1
30913 // CHECK22-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21
30914 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]]
30915 // CHECK22:       omp.inner.for.end17:
30916 // CHECK22-NEXT:    store i32 10, i32* [[I6]], align 4
30917 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[M]], align 4
30918 // CHECK22-NEXT:    store i32 [[TMP12]], i32* [[DOTCAPTURE_EXPR_]], align 4
30919 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30920 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB20]], align 4
30921 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30922 // CHECK22-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV21]], align 4
30923 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
30924 // CHECK22:       omp.inner.for.cond23:
30925 // CHECK22-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30926 // CHECK22-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !24
30927 // CHECK22-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
30928 // CHECK22-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
30929 // CHECK22:       omp.inner.for.body25:
30930 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30931 // CHECK22-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP16]], 1
30932 // CHECK22-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
30933 // CHECK22-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !24
30934 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !24
30935 // CHECK22-NEXT:    [[IDXPROM28:%.*]] = sext i32 [[TMP17]] to i64
30936 // CHECK22-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM28]]
30937 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !24
30938 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE30:%.*]]
30939 // CHECK22:       omp.body.continue30:
30940 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC31:%.*]]
30941 // CHECK22:       omp.inner.for.inc31:
30942 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30943 // CHECK22-NEXT:    [[ADD32:%.*]] = add nsw i32 [[TMP18]], 1
30944 // CHECK22-NEXT:    store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !24
30945 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP25:![0-9]+]]
30946 // CHECK22:       omp.inner.for.end33:
30947 // CHECK22-NEXT:    store i32 10, i32* [[I22]], align 4
30948 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB35]], align 4
30949 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB36]], align 4
30950 // CHECK22-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB35]], align 4
30951 // CHECK22-NEXT:    store i32 [[TMP19]], i32* [[DOTOMP_IV37]], align 4
30952 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND39:%.*]]
30953 // CHECK22:       omp.inner.for.cond39:
30954 // CHECK22-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30955 // CHECK22-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB36]], align 4, !llvm.access.group !27
30956 // CHECK22-NEXT:    [[CMP40:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
30957 // CHECK22-NEXT:    br i1 [[CMP40]], label [[OMP_INNER_FOR_BODY41:%.*]], label [[OMP_INNER_FOR_END49:%.*]]
30958 // CHECK22:       omp.inner.for.body41:
30959 // CHECK22-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30960 // CHECK22-NEXT:    [[MUL42:%.*]] = mul nsw i32 [[TMP22]], 1
30961 // CHECK22-NEXT:    [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
30962 // CHECK22-NEXT:    store i32 [[ADD43]], i32* [[I38]], align 4, !llvm.access.group !27
30963 // CHECK22-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I38]], align 4, !llvm.access.group !27
30964 // CHECK22-NEXT:    [[IDXPROM44:%.*]] = sext i32 [[TMP23]] to i64
30965 // CHECK22-NEXT:    [[ARRAYIDX45:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM44]]
30966 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX45]], align 4, !llvm.access.group !27
30967 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE46:%.*]]
30968 // CHECK22:       omp.body.continue46:
30969 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC47:%.*]]
30970 // CHECK22:       omp.inner.for.inc47:
30971 // CHECK22-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30972 // CHECK22-NEXT:    [[ADD48:%.*]] = add nsw i32 [[TMP24]], 1
30973 // CHECK22-NEXT:    store i32 [[ADD48]], i32* [[DOTOMP_IV37]], align 4, !llvm.access.group !27
30974 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND39]], !llvm.loop [[LOOP28:![0-9]+]]
30975 // CHECK22:       omp.inner.for.end49:
30976 // CHECK22-NEXT:    store i32 10, i32* [[I38]], align 4
30977 // CHECK22-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
30978 // CHECK22-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_50]], align 4
30979 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB52]], align 4
30980 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB53]], align 4
30981 // CHECK22-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB52]], align 4
30982 // CHECK22-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV54]], align 4
30983 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND56:%.*]]
30984 // CHECK22:       omp.inner.for.cond56:
30985 // CHECK22-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30986 // CHECK22-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB53]], align 4, !llvm.access.group !30
30987 // CHECK22-NEXT:    [[CMP57:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
30988 // CHECK22-NEXT:    br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
30989 // CHECK22:       omp.inner.for.body58:
30990 // CHECK22-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
30991 // CHECK22-NEXT:    [[MUL59:%.*]] = mul nsw i32 [[TMP29]], 1
30992 // CHECK22-NEXT:    [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
30993 // CHECK22-NEXT:    store i32 [[ADD60]], i32* [[I55]], align 4, !llvm.access.group !30
30994 // CHECK22-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I55]], align 4, !llvm.access.group !30
30995 // CHECK22-NEXT:    [[IDXPROM61:%.*]] = sext i32 [[TMP30]] to i64
30996 // CHECK22-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM61]]
30997 // CHECK22-NEXT:    store i32 0, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !30
30998 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE63:%.*]]
30999 // CHECK22:       omp.body.continue63:
31000 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC64:%.*]]
31001 // CHECK22:       omp.inner.for.inc64:
31002 // CHECK22-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
31003 // CHECK22-NEXT:    [[ADD65:%.*]] = add nsw i32 [[TMP31]], 1
31004 // CHECK22-NEXT:    store i32 [[ADD65]], i32* [[DOTOMP_IV54]], align 4, !llvm.access.group !30
31005 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP31:![0-9]+]]
31006 // CHECK22:       omp.inner.for.end66:
31007 // CHECK22-NEXT:    store i32 10, i32* [[I55]], align 4
31008 // CHECK22-NEXT:    ret i32 0
31009 //
31010 //
31011 // CHECK23-LABEL: define {{[^@]+}}@main
31012 // CHECK23-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
31013 // CHECK23-NEXT:  entry:
31014 // CHECK23-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
31015 // CHECK23-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
31016 // CHECK23-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
31017 // CHECK23-NEXT:    [[N:%.*]] = alloca i32, align 4
31018 // CHECK23-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
31019 // CHECK23-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
31020 // CHECK23-NEXT:    [[M:%.*]] = alloca i32, align 4
31021 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
31022 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
31023 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
31024 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
31025 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
31026 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
31027 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
31028 // CHECK23-NEXT:    [[I3:%.*]] = alloca i32, align 4
31029 // CHECK23-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
31030 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
31031 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
31032 // CHECK23-NEXT:    [[DOTOMP_LB16:%.*]] = alloca i32, align 4
31033 // CHECK23-NEXT:    [[DOTOMP_UB17:%.*]] = alloca i32, align 4
31034 // CHECK23-NEXT:    [[I18:%.*]] = alloca i32, align 4
31035 // CHECK23-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
31036 // CHECK23-NEXT:    [[I22:%.*]] = alloca i32, align 4
31037 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
31038 // CHECK23-NEXT:    [[_TMP39:%.*]] = alloca i32, align 4
31039 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
31040 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
31041 // CHECK23-NEXT:    [[DOTOMP_LB45:%.*]] = alloca i32, align 4
31042 // CHECK23-NEXT:    [[DOTOMP_UB46:%.*]] = alloca i32, align 4
31043 // CHECK23-NEXT:    [[I47:%.*]] = alloca i32, align 4
31044 // CHECK23-NEXT:    [[DOTOMP_IV50:%.*]] = alloca i32, align 4
31045 // CHECK23-NEXT:    [[I51:%.*]] = alloca i32, align 4
31046 // CHECK23-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
31047 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
31048 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
31049 // CHECK23-NEXT:    [[DOTOMP_LB73:%.*]] = alloca i32, align 4
31050 // CHECK23-NEXT:    [[DOTOMP_UB74:%.*]] = alloca i32, align 4
31051 // CHECK23-NEXT:    [[I75:%.*]] = alloca i32, align 4
31052 // CHECK23-NEXT:    [[DOTOMP_IV78:%.*]] = alloca i32, align 4
31053 // CHECK23-NEXT:    [[I79:%.*]] = alloca i32, align 4
31054 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_95:%.*]] = alloca i32, align 4
31055 // CHECK23-NEXT:    [[_TMP96:%.*]] = alloca i32, align 4
31056 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_97:%.*]] = alloca i32, align 4
31057 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
31058 // CHECK23-NEXT:    [[DOTOMP_LB102:%.*]] = alloca i32, align 4
31059 // CHECK23-NEXT:    [[DOTOMP_UB103:%.*]] = alloca i32, align 4
31060 // CHECK23-NEXT:    [[I104:%.*]] = alloca i32, align 4
31061 // CHECK23-NEXT:    [[DOTOMP_IV107:%.*]] = alloca i32, align 4
31062 // CHECK23-NEXT:    [[I108:%.*]] = alloca i32, align 4
31063 // CHECK23-NEXT:    store i32 0, i32* [[RETVAL]], align 4
31064 // CHECK23-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
31065 // CHECK23-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
31066 // CHECK23-NEXT:    store i32 100, i32* [[N]], align 4
31067 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
31068 // CHECK23-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
31069 // CHECK23-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
31070 // CHECK23-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
31071 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
31072 // CHECK23-NEXT:    store i32 10, i32* [[M]], align 4
31073 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
31074 // CHECK23-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
31075 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31076 // CHECK23-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
31077 // CHECK23-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
31078 // CHECK23-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
31079 // CHECK23-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
31080 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
31081 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
31082 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
31083 // CHECK23-NEXT:    store i32 0, i32* [[I]], align 4
31084 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31085 // CHECK23-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
31086 // CHECK23-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
31087 // CHECK23:       simd.if.then:
31088 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
31089 // CHECK23-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
31090 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
31091 // CHECK23:       omp.inner.for.cond:
31092 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31093 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
31094 // CHECK23-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
31095 // CHECK23-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
31096 // CHECK23:       omp.inner.for.body:
31097 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31098 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
31099 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
31100 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
31101 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
31102 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP10]]
31103 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
31104 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
31105 // CHECK23:       omp.body.continue:
31106 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
31107 // CHECK23:       omp.inner.for.inc:
31108 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31109 // CHECK23-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
31110 // CHECK23-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31111 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
31112 // CHECK23:       omp.inner.for.end:
31113 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31114 // CHECK23-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0
31115 // CHECK23-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
31116 // CHECK23-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
31117 // CHECK23-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
31118 // CHECK23-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
31119 // CHECK23-NEXT:    br label [[SIMD_IF_END]]
31120 // CHECK23:       simd.if.end:
31121 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[N]], align 4
31122 // CHECK23-NEXT:    store i32 [[TMP13]], i32* [[DOTCAPTURE_EXPR_11]], align 4
31123 // CHECK23-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31124 // CHECK23-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0
31125 // CHECK23-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
31126 // CHECK23-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
31127 // CHECK23-NEXT:    store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4
31128 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB16]], align 4
31129 // CHECK23-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4
31130 // CHECK23-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_UB17]], align 4
31131 // CHECK23-NEXT:    store i32 0, i32* [[I18]], align 4
31132 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31133 // CHECK23-NEXT:    [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]]
31134 // CHECK23-NEXT:    br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]]
31135 // CHECK23:       simd.if.then20:
31136 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4
31137 // CHECK23-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV21]], align 4
31138 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
31139 // CHECK23:       omp.inner.for.cond23:
31140 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31141 // CHECK23-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !7
31142 // CHECK23-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
31143 // CHECK23-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]]
31144 // CHECK23:       omp.inner.for.body25:
31145 // CHECK23-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31146 // CHECK23-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1
31147 // CHECK23-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
31148 // CHECK23-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !7
31149 // CHECK23-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !7
31150 // CHECK23-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP21]]
31151 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX28]], align 4, !llvm.access.group !7
31152 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE29:%.*]]
31153 // CHECK23:       omp.body.continue29:
31154 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC30:%.*]]
31155 // CHECK23:       omp.inner.for.inc30:
31156 // CHECK23-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31157 // CHECK23-NEXT:    [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1
31158 // CHECK23-NEXT:    store i32 [[ADD31]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31159 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]]
31160 // CHECK23:       omp.inner.for.end32:
31161 // CHECK23-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31162 // CHECK23-NEXT:    [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0
31163 // CHECK23-NEXT:    [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1
31164 // CHECK23-NEXT:    [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1
31165 // CHECK23-NEXT:    [[ADD36:%.*]] = add nsw i32 0, [[MUL35]]
31166 // CHECK23-NEXT:    store i32 [[ADD36]], i32* [[I22]], align 4
31167 // CHECK23-NEXT:    br label [[SIMD_IF_END37]]
31168 // CHECK23:       simd.if.end37:
31169 // CHECK23-NEXT:    [[TMP24:%.*]] = load i32, i32* [[M]], align 4
31170 // CHECK23-NEXT:    store i32 [[TMP24]], i32* [[DOTCAPTURE_EXPR_38]], align 4
31171 // CHECK23-NEXT:    [[TMP25:%.*]] = load i32, i32* [[N]], align 4
31172 // CHECK23-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_40]], align 4
31173 // CHECK23-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31174 // CHECK23-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0
31175 // CHECK23-NEXT:    [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
31176 // CHECK23-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
31177 // CHECK23-NEXT:    store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4
31178 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB45]], align 4
31179 // CHECK23-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
31180 // CHECK23-NEXT:    store i32 [[TMP27]], i32* [[DOTOMP_UB46]], align 4
31181 // CHECK23-NEXT:    store i32 0, i32* [[I47]], align 4
31182 // CHECK23-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31183 // CHECK23-NEXT:    [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]]
31184 // CHECK23-NEXT:    br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]]
31185 // CHECK23:       simd.if.then49:
31186 // CHECK23-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_LB45]], align 4
31187 // CHECK23-NEXT:    store i32 [[TMP29]], i32* [[DOTOMP_IV50]], align 4
31188 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND52:%.*]]
31189 // CHECK23:       omp.inner.for.cond52:
31190 // CHECK23-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31191 // CHECK23-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_UB46]], align 4, !llvm.access.group !10
31192 // CHECK23-NEXT:    [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]]
31193 // CHECK23-NEXT:    br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]]
31194 // CHECK23:       omp.inner.for.body54:
31195 // CHECK23-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31196 // CHECK23-NEXT:    [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1
31197 // CHECK23-NEXT:    [[ADD56:%.*]] = add nsw i32 0, [[MUL55]]
31198 // CHECK23-NEXT:    store i32 [[ADD56]], i32* [[I51]], align 4, !llvm.access.group !10
31199 // CHECK23-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I51]], align 4, !llvm.access.group !10
31200 // CHECK23-NEXT:    [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP33]]
31201 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX57]], align 4, !llvm.access.group !10
31202 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE58:%.*]]
31203 // CHECK23:       omp.body.continue58:
31204 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC59:%.*]]
31205 // CHECK23:       omp.inner.for.inc59:
31206 // CHECK23-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31207 // CHECK23-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1
31208 // CHECK23-NEXT:    store i32 [[ADD60]], i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31209 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]]
31210 // CHECK23:       omp.inner.for.end61:
31211 // CHECK23-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31212 // CHECK23-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0
31213 // CHECK23-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
31214 // CHECK23-NEXT:    [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1
31215 // CHECK23-NEXT:    [[ADD65:%.*]] = add nsw i32 0, [[MUL64]]
31216 // CHECK23-NEXT:    store i32 [[ADD65]], i32* [[I51]], align 4
31217 // CHECK23-NEXT:    br label [[SIMD_IF_END66]]
31218 // CHECK23:       simd.if.end66:
31219 // CHECK23-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
31220 // CHECK23-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_68]], align 4
31221 // CHECK23-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31222 // CHECK23-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP37]], 0
31223 // CHECK23-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
31224 // CHECK23-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
31225 // CHECK23-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
31226 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB73]], align 4
31227 // CHECK23-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
31228 // CHECK23-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB74]], align 4
31229 // CHECK23-NEXT:    store i32 0, i32* [[I75]], align 4
31230 // CHECK23-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31231 // CHECK23-NEXT:    [[CMP76:%.*]] = icmp slt i32 0, [[TMP39]]
31232 // CHECK23-NEXT:    br i1 [[CMP76]], label [[SIMD_IF_THEN77:%.*]], label [[SIMD_IF_END94:%.*]]
31233 // CHECK23:       simd.if.then77:
31234 // CHECK23-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB73]], align 4
31235 // CHECK23-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV78]], align 4
31236 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND80:%.*]]
31237 // CHECK23:       omp.inner.for.cond80:
31238 // CHECK23-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31239 // CHECK23-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB74]], align 4, !llvm.access.group !13
31240 // CHECK23-NEXT:    [[CMP81:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
31241 // CHECK23-NEXT:    br i1 [[CMP81]], label [[OMP_INNER_FOR_BODY82:%.*]], label [[OMP_INNER_FOR_END89:%.*]]
31242 // CHECK23:       omp.inner.for.body82:
31243 // CHECK23-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31244 // CHECK23-NEXT:    [[MUL83:%.*]] = mul nsw i32 [[TMP43]], 1
31245 // CHECK23-NEXT:    [[ADD84:%.*]] = add nsw i32 0, [[MUL83]]
31246 // CHECK23-NEXT:    store i32 [[ADD84]], i32* [[I79]], align 4, !llvm.access.group !13
31247 // CHECK23-NEXT:    [[TMP44:%.*]] = load i32, i32* [[I79]], align 4, !llvm.access.group !13
31248 // CHECK23-NEXT:    [[ARRAYIDX85:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP44]]
31249 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX85]], align 4, !llvm.access.group !13
31250 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE86:%.*]]
31251 // CHECK23:       omp.body.continue86:
31252 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC87:%.*]]
31253 // CHECK23:       omp.inner.for.inc87:
31254 // CHECK23-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31255 // CHECK23-NEXT:    [[ADD88:%.*]] = add nsw i32 [[TMP45]], 1
31256 // CHECK23-NEXT:    store i32 [[ADD88]], i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31257 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND80]], !llvm.loop [[LOOP14:![0-9]+]]
31258 // CHECK23:       omp.inner.for.end89:
31259 // CHECK23-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31260 // CHECK23-NEXT:    [[SUB90:%.*]] = sub nsw i32 [[TMP46]], 0
31261 // CHECK23-NEXT:    [[DIV91:%.*]] = sdiv i32 [[SUB90]], 1
31262 // CHECK23-NEXT:    [[MUL92:%.*]] = mul nsw i32 [[DIV91]], 1
31263 // CHECK23-NEXT:    [[ADD93:%.*]] = add nsw i32 0, [[MUL92]]
31264 // CHECK23-NEXT:    store i32 [[ADD93]], i32* [[I79]], align 4
31265 // CHECK23-NEXT:    br label [[SIMD_IF_END94]]
31266 // CHECK23:       simd.if.end94:
31267 // CHECK23-NEXT:    [[TMP47:%.*]] = load i32, i32* [[M]], align 4
31268 // CHECK23-NEXT:    store i32 [[TMP47]], i32* [[DOTCAPTURE_EXPR_95]], align 4
31269 // CHECK23-NEXT:    [[TMP48:%.*]] = load i32, i32* [[N]], align 4
31270 // CHECK23-NEXT:    store i32 [[TMP48]], i32* [[DOTCAPTURE_EXPR_97]], align 4
31271 // CHECK23-NEXT:    [[TMP49:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31272 // CHECK23-NEXT:    [[SUB99:%.*]] = sub nsw i32 [[TMP49]], 0
31273 // CHECK23-NEXT:    [[DIV100:%.*]] = sdiv i32 [[SUB99]], 1
31274 // CHECK23-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[DIV100]], 1
31275 // CHECK23-NEXT:    store i32 [[SUB101]], i32* [[DOTCAPTURE_EXPR_98]], align 4
31276 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB102]], align 4
31277 // CHECK23-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_98]], align 4
31278 // CHECK23-NEXT:    store i32 [[TMP50]], i32* [[DOTOMP_UB103]], align 4
31279 // CHECK23-NEXT:    store i32 0, i32* [[I104]], align 4
31280 // CHECK23-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31281 // CHECK23-NEXT:    [[CMP105:%.*]] = icmp slt i32 0, [[TMP51]]
31282 // CHECK23-NEXT:    br i1 [[CMP105]], label [[SIMD_IF_THEN106:%.*]], label [[SIMD_IF_END123:%.*]]
31283 // CHECK23:       simd.if.then106:
31284 // CHECK23-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_LB102]], align 4
31285 // CHECK23-NEXT:    store i32 [[TMP52]], i32* [[DOTOMP_IV107]], align 4
31286 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND109:%.*]]
31287 // CHECK23:       omp.inner.for.cond109:
31288 // CHECK23-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31289 // CHECK23-NEXT:    [[TMP54:%.*]] = load i32, i32* [[DOTOMP_UB103]], align 4, !llvm.access.group !16
31290 // CHECK23-NEXT:    [[CMP110:%.*]] = icmp sle i32 [[TMP53]], [[TMP54]]
31291 // CHECK23-NEXT:    br i1 [[CMP110]], label [[OMP_INNER_FOR_BODY111:%.*]], label [[OMP_INNER_FOR_END118:%.*]]
31292 // CHECK23:       omp.inner.for.body111:
31293 // CHECK23-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31294 // CHECK23-NEXT:    [[MUL112:%.*]] = mul nsw i32 [[TMP55]], 1
31295 // CHECK23-NEXT:    [[ADD113:%.*]] = add nsw i32 0, [[MUL112]]
31296 // CHECK23-NEXT:    store i32 [[ADD113]], i32* [[I108]], align 4, !llvm.access.group !16
31297 // CHECK23-NEXT:    [[TMP56:%.*]] = load i32, i32* [[I108]], align 4, !llvm.access.group !16
31298 // CHECK23-NEXT:    [[ARRAYIDX114:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP56]]
31299 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX114]], align 4, !llvm.access.group !16
31300 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE115:%.*]]
31301 // CHECK23:       omp.body.continue115:
31302 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC116:%.*]]
31303 // CHECK23:       omp.inner.for.inc116:
31304 // CHECK23-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31305 // CHECK23-NEXT:    [[ADD117:%.*]] = add nsw i32 [[TMP57]], 1
31306 // CHECK23-NEXT:    store i32 [[ADD117]], i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31307 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND109]], !llvm.loop [[LOOP17:![0-9]+]]
31308 // CHECK23:       omp.inner.for.end118:
31309 // CHECK23-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31310 // CHECK23-NEXT:    [[SUB119:%.*]] = sub nsw i32 [[TMP58]], 0
31311 // CHECK23-NEXT:    [[DIV120:%.*]] = sdiv i32 [[SUB119]], 1
31312 // CHECK23-NEXT:    [[MUL121:%.*]] = mul nsw i32 [[DIV120]], 1
31313 // CHECK23-NEXT:    [[ADD122:%.*]] = add nsw i32 0, [[MUL121]]
31314 // CHECK23-NEXT:    store i32 [[ADD122]], i32* [[I108]], align 4
31315 // CHECK23-NEXT:    br label [[SIMD_IF_END123]]
31316 // CHECK23:       simd.if.end123:
31317 // CHECK23-NEXT:    [[TMP59:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
31318 // CHECK23-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP59]])
31319 // CHECK23-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
31320 // CHECK23-NEXT:    [[TMP60:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
31321 // CHECK23-NEXT:    call void @llvm.stackrestore(i8* [[TMP60]])
31322 // CHECK23-NEXT:    [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
31323 // CHECK23-NEXT:    ret i32 [[TMP61]]
31324 //
31325 //
31326 // CHECK23-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
31327 // CHECK23-SAME: (i32 [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
31328 // CHECK23-NEXT:  entry:
31329 // CHECK23-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
31330 // CHECK23-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
31331 // CHECK23-NEXT:    [[M:%.*]] = alloca i32, align 4
31332 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
31333 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
31334 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
31335 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
31336 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
31337 // CHECK23-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
31338 // CHECK23-NEXT:    [[DOTOMP_LB3:%.*]] = alloca i32, align 4
31339 // CHECK23-NEXT:    [[DOTOMP_UB4:%.*]] = alloca i32, align 4
31340 // CHECK23-NEXT:    [[DOTOMP_IV5:%.*]] = alloca i32, align 4
31341 // CHECK23-NEXT:    [[I6:%.*]] = alloca i32, align 4
31342 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
31343 // CHECK23-NEXT:    [[_TMP17:%.*]] = alloca i32, align 4
31344 // CHECK23-NEXT:    [[DOTOMP_LB18:%.*]] = alloca i32, align 4
31345 // CHECK23-NEXT:    [[DOTOMP_UB19:%.*]] = alloca i32, align 4
31346 // CHECK23-NEXT:    [[DOTOMP_IV20:%.*]] = alloca i32, align 4
31347 // CHECK23-NEXT:    [[I21:%.*]] = alloca i32, align 4
31348 // CHECK23-NEXT:    [[_TMP32:%.*]] = alloca i32, align 4
31349 // CHECK23-NEXT:    [[DOTOMP_LB33:%.*]] = alloca i32, align 4
31350 // CHECK23-NEXT:    [[DOTOMP_UB34:%.*]] = alloca i32, align 4
31351 // CHECK23-NEXT:    [[DOTOMP_IV35:%.*]] = alloca i32, align 4
31352 // CHECK23-NEXT:    [[I36:%.*]] = alloca i32, align 4
31353 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4
31354 // CHECK23-NEXT:    [[_TMP48:%.*]] = alloca i32, align 4
31355 // CHECK23-NEXT:    [[DOTOMP_LB49:%.*]] = alloca i32, align 4
31356 // CHECK23-NEXT:    [[DOTOMP_UB50:%.*]] = alloca i32, align 4
31357 // CHECK23-NEXT:    [[DOTOMP_IV51:%.*]] = alloca i32, align 4
31358 // CHECK23-NEXT:    [[I52:%.*]] = alloca i32, align 4
31359 // CHECK23-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
31360 // CHECK23-NEXT:    store i32 10, i32* [[M]], align 4
31361 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
31362 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
31363 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
31364 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
31365 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
31366 // CHECK23:       omp.inner.for.cond:
31367 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31368 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
31369 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
31370 // CHECK23-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
31371 // CHECK23:       omp.inner.for.body:
31372 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31373 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
31374 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
31375 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
31376 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !19
31377 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP4]]
31378 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
31379 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
31380 // CHECK23:       omp.body.continue:
31381 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
31382 // CHECK23:       omp.inner.for.inc:
31383 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31384 // CHECK23-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
31385 // CHECK23-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31386 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
31387 // CHECK23:       omp.inner.for.end:
31388 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
31389 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB3]], align 4
31390 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB4]], align 4
31391 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4
31392 // CHECK23-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4
31393 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND7:%.*]]
31394 // CHECK23:       omp.inner.for.cond7:
31395 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31396 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !22
31397 // CHECK23-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
31398 // CHECK23-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
31399 // CHECK23:       omp.inner.for.body9:
31400 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31401 // CHECK23-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
31402 // CHECK23-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
31403 // CHECK23-NEXT:    store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !22
31404 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !22
31405 // CHECK23-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP10]]
31406 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX12]], align 4, !llvm.access.group !22
31407 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE13:%.*]]
31408 // CHECK23:       omp.body.continue13:
31409 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC14:%.*]]
31410 // CHECK23:       omp.inner.for.inc14:
31411 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31412 // CHECK23-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1
31413 // CHECK23-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31414 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]]
31415 // CHECK23:       omp.inner.for.end16:
31416 // CHECK23-NEXT:    store i32 10, i32* [[I6]], align 4
31417 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[M]], align 4
31418 // CHECK23-NEXT:    store i32 [[TMP12]], i32* [[DOTCAPTURE_EXPR_]], align 4
31419 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB18]], align 4
31420 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB19]], align 4
31421 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB18]], align 4
31422 // CHECK23-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV20]], align 4
31423 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND22:%.*]]
31424 // CHECK23:       omp.inner.for.cond22:
31425 // CHECK23-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31426 // CHECK23-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB19]], align 4, !llvm.access.group !25
31427 // CHECK23-NEXT:    [[CMP23:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
31428 // CHECK23-NEXT:    br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
31429 // CHECK23:       omp.inner.for.body24:
31430 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31431 // CHECK23-NEXT:    [[MUL25:%.*]] = mul nsw i32 [[TMP16]], 1
31432 // CHECK23-NEXT:    [[ADD26:%.*]] = add nsw i32 0, [[MUL25]]
31433 // CHECK23-NEXT:    store i32 [[ADD26]], i32* [[I21]], align 4, !llvm.access.group !25
31434 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I21]], align 4, !llvm.access.group !25
31435 // CHECK23-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP17]]
31436 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX27]], align 4, !llvm.access.group !25
31437 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
31438 // CHECK23:       omp.body.continue28:
31439 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
31440 // CHECK23:       omp.inner.for.inc29:
31441 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31442 // CHECK23-NEXT:    [[ADD30:%.*]] = add nsw i32 [[TMP18]], 1
31443 // CHECK23-NEXT:    store i32 [[ADD30]], i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31444 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP26:![0-9]+]]
31445 // CHECK23:       omp.inner.for.end31:
31446 // CHECK23-NEXT:    store i32 10, i32* [[I21]], align 4
31447 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB33]], align 4
31448 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB34]], align 4
31449 // CHECK23-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB33]], align 4
31450 // CHECK23-NEXT:    store i32 [[TMP19]], i32* [[DOTOMP_IV35]], align 4
31451 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND37:%.*]]
31452 // CHECK23:       omp.inner.for.cond37:
31453 // CHECK23-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31454 // CHECK23-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB34]], align 4, !llvm.access.group !28
31455 // CHECK23-NEXT:    [[CMP38:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
31456 // CHECK23-NEXT:    br i1 [[CMP38]], label [[OMP_INNER_FOR_BODY39:%.*]], label [[OMP_INNER_FOR_END46:%.*]]
31457 // CHECK23:       omp.inner.for.body39:
31458 // CHECK23-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31459 // CHECK23-NEXT:    [[MUL40:%.*]] = mul nsw i32 [[TMP22]], 1
31460 // CHECK23-NEXT:    [[ADD41:%.*]] = add nsw i32 0, [[MUL40]]
31461 // CHECK23-NEXT:    store i32 [[ADD41]], i32* [[I36]], align 4, !llvm.access.group !28
31462 // CHECK23-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I36]], align 4, !llvm.access.group !28
31463 // CHECK23-NEXT:    [[ARRAYIDX42:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP23]]
31464 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX42]], align 4, !llvm.access.group !28
31465 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE43:%.*]]
31466 // CHECK23:       omp.body.continue43:
31467 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC44:%.*]]
31468 // CHECK23:       omp.inner.for.inc44:
31469 // CHECK23-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31470 // CHECK23-NEXT:    [[ADD45:%.*]] = add nsw i32 [[TMP24]], 1
31471 // CHECK23-NEXT:    store i32 [[ADD45]], i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31472 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND37]], !llvm.loop [[LOOP29:![0-9]+]]
31473 // CHECK23:       omp.inner.for.end46:
31474 // CHECK23-NEXT:    store i32 10, i32* [[I36]], align 4
31475 // CHECK23-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
31476 // CHECK23-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_47]], align 4
31477 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB49]], align 4
31478 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB50]], align 4
31479 // CHECK23-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB49]], align 4
31480 // CHECK23-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV51]], align 4
31481 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND53:%.*]]
31482 // CHECK23:       omp.inner.for.cond53:
31483 // CHECK23-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31484 // CHECK23-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB50]], align 4, !llvm.access.group !31
31485 // CHECK23-NEXT:    [[CMP54:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
31486 // CHECK23-NEXT:    br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END62:%.*]]
31487 // CHECK23:       omp.inner.for.body55:
31488 // CHECK23-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31489 // CHECK23-NEXT:    [[MUL56:%.*]] = mul nsw i32 [[TMP29]], 1
31490 // CHECK23-NEXT:    [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
31491 // CHECK23-NEXT:    store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !31
31492 // CHECK23-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !31
31493 // CHECK23-NEXT:    [[ARRAYIDX58:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP30]]
31494 // CHECK23-NEXT:    store i32 0, i32* [[ARRAYIDX58]], align 4, !llvm.access.group !31
31495 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE59:%.*]]
31496 // CHECK23:       omp.body.continue59:
31497 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC60:%.*]]
31498 // CHECK23:       omp.inner.for.inc60:
31499 // CHECK23-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31500 // CHECK23-NEXT:    [[ADD61:%.*]] = add nsw i32 [[TMP31]], 1
31501 // CHECK23-NEXT:    store i32 [[ADD61]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31502 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP32:![0-9]+]]
31503 // CHECK23:       omp.inner.for.end62:
31504 // CHECK23-NEXT:    store i32 10, i32* [[I52]], align 4
31505 // CHECK23-NEXT:    ret i32 0
31506 //
31507 //
31508 // CHECK24-LABEL: define {{[^@]+}}@main
31509 // CHECK24-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
31510 // CHECK24-NEXT:  entry:
31511 // CHECK24-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
31512 // CHECK24-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
31513 // CHECK24-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
31514 // CHECK24-NEXT:    [[N:%.*]] = alloca i32, align 4
31515 // CHECK24-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
31516 // CHECK24-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
31517 // CHECK24-NEXT:    [[M:%.*]] = alloca i32, align 4
31518 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
31519 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
31520 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
31521 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
31522 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
31523 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
31524 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
31525 // CHECK24-NEXT:    [[I3:%.*]] = alloca i32, align 4
31526 // CHECK24-NEXT:    [[_TMP10:%.*]] = alloca i32, align 4
31527 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
31528 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
31529 // CHECK24-NEXT:    [[DOTOMP_LB16:%.*]] = alloca i32, align 4
31530 // CHECK24-NEXT:    [[DOTOMP_UB17:%.*]] = alloca i32, align 4
31531 // CHECK24-NEXT:    [[I18:%.*]] = alloca i32, align 4
31532 // CHECK24-NEXT:    [[DOTOMP_IV21:%.*]] = alloca i32, align 4
31533 // CHECK24-NEXT:    [[I22:%.*]] = alloca i32, align 4
31534 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
31535 // CHECK24-NEXT:    [[_TMP39:%.*]] = alloca i32, align 4
31536 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
31537 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
31538 // CHECK24-NEXT:    [[DOTOMP_LB45:%.*]] = alloca i32, align 4
31539 // CHECK24-NEXT:    [[DOTOMP_UB46:%.*]] = alloca i32, align 4
31540 // CHECK24-NEXT:    [[I47:%.*]] = alloca i32, align 4
31541 // CHECK24-NEXT:    [[DOTOMP_IV50:%.*]] = alloca i32, align 4
31542 // CHECK24-NEXT:    [[I51:%.*]] = alloca i32, align 4
31543 // CHECK24-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
31544 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
31545 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
31546 // CHECK24-NEXT:    [[DOTOMP_LB73:%.*]] = alloca i32, align 4
31547 // CHECK24-NEXT:    [[DOTOMP_UB74:%.*]] = alloca i32, align 4
31548 // CHECK24-NEXT:    [[I75:%.*]] = alloca i32, align 4
31549 // CHECK24-NEXT:    [[DOTOMP_IV78:%.*]] = alloca i32, align 4
31550 // CHECK24-NEXT:    [[I79:%.*]] = alloca i32, align 4
31551 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_95:%.*]] = alloca i32, align 4
31552 // CHECK24-NEXT:    [[_TMP96:%.*]] = alloca i32, align 4
31553 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_97:%.*]] = alloca i32, align 4
31554 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
31555 // CHECK24-NEXT:    [[DOTOMP_LB102:%.*]] = alloca i32, align 4
31556 // CHECK24-NEXT:    [[DOTOMP_UB103:%.*]] = alloca i32, align 4
31557 // CHECK24-NEXT:    [[I104:%.*]] = alloca i32, align 4
31558 // CHECK24-NEXT:    [[DOTOMP_IV107:%.*]] = alloca i32, align 4
31559 // CHECK24-NEXT:    [[I108:%.*]] = alloca i32, align 4
31560 // CHECK24-NEXT:    store i32 0, i32* [[RETVAL]], align 4
31561 // CHECK24-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
31562 // CHECK24-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
31563 // CHECK24-NEXT:    store i32 100, i32* [[N]], align 4
31564 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
31565 // CHECK24-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
31566 // CHECK24-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
31567 // CHECK24-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
31568 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
31569 // CHECK24-NEXT:    store i32 10, i32* [[M]], align 4
31570 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N]], align 4
31571 // CHECK24-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
31572 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31573 // CHECK24-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
31574 // CHECK24-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
31575 // CHECK24-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
31576 // CHECK24-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
31577 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
31578 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
31579 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
31580 // CHECK24-NEXT:    store i32 0, i32* [[I]], align 4
31581 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31582 // CHECK24-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
31583 // CHECK24-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
31584 // CHECK24:       simd.if.then:
31585 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
31586 // CHECK24-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
31587 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
31588 // CHECK24:       omp.inner.for.cond:
31589 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31590 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
31591 // CHECK24-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
31592 // CHECK24-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
31593 // CHECK24:       omp.inner.for.body:
31594 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31595 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
31596 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
31597 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
31598 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
31599 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP10]]
31600 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
31601 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
31602 // CHECK24:       omp.body.continue:
31603 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
31604 // CHECK24:       omp.inner.for.inc:
31605 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31606 // CHECK24-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
31607 // CHECK24-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
31608 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
31609 // CHECK24:       omp.inner.for.end:
31610 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
31611 // CHECK24-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0
31612 // CHECK24-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
31613 // CHECK24-NEXT:    [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
31614 // CHECK24-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
31615 // CHECK24-NEXT:    store i32 [[ADD9]], i32* [[I3]], align 4
31616 // CHECK24-NEXT:    br label [[SIMD_IF_END]]
31617 // CHECK24:       simd.if.end:
31618 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[N]], align 4
31619 // CHECK24-NEXT:    store i32 [[TMP13]], i32* [[DOTCAPTURE_EXPR_11]], align 4
31620 // CHECK24-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31621 // CHECK24-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0
31622 // CHECK24-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
31623 // CHECK24-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
31624 // CHECK24-NEXT:    store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4
31625 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB16]], align 4
31626 // CHECK24-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4
31627 // CHECK24-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_UB17]], align 4
31628 // CHECK24-NEXT:    store i32 0, i32* [[I18]], align 4
31629 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31630 // CHECK24-NEXT:    [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]]
31631 // CHECK24-NEXT:    br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]]
31632 // CHECK24:       simd.if.then20:
31633 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4
31634 // CHECK24-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV21]], align 4
31635 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND23:%.*]]
31636 // CHECK24:       omp.inner.for.cond23:
31637 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31638 // CHECK24-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !7
31639 // CHECK24-NEXT:    [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
31640 // CHECK24-NEXT:    br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]]
31641 // CHECK24:       omp.inner.for.body25:
31642 // CHECK24-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31643 // CHECK24-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1
31644 // CHECK24-NEXT:    [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
31645 // CHECK24-NEXT:    store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !7
31646 // CHECK24-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !7
31647 // CHECK24-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP21]]
31648 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX28]], align 4, !llvm.access.group !7
31649 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE29:%.*]]
31650 // CHECK24:       omp.body.continue29:
31651 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC30:%.*]]
31652 // CHECK24:       omp.inner.for.inc30:
31653 // CHECK24-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31654 // CHECK24-NEXT:    [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1
31655 // CHECK24-NEXT:    store i32 [[ADD31]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7
31656 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]]
31657 // CHECK24:       omp.inner.for.end32:
31658 // CHECK24-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
31659 // CHECK24-NEXT:    [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0
31660 // CHECK24-NEXT:    [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1
31661 // CHECK24-NEXT:    [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1
31662 // CHECK24-NEXT:    [[ADD36:%.*]] = add nsw i32 0, [[MUL35]]
31663 // CHECK24-NEXT:    store i32 [[ADD36]], i32* [[I22]], align 4
31664 // CHECK24-NEXT:    br label [[SIMD_IF_END37]]
31665 // CHECK24:       simd.if.end37:
31666 // CHECK24-NEXT:    [[TMP24:%.*]] = load i32, i32* [[M]], align 4
31667 // CHECK24-NEXT:    store i32 [[TMP24]], i32* [[DOTCAPTURE_EXPR_38]], align 4
31668 // CHECK24-NEXT:    [[TMP25:%.*]] = load i32, i32* [[N]], align 4
31669 // CHECK24-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_40]], align 4
31670 // CHECK24-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31671 // CHECK24-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0
31672 // CHECK24-NEXT:    [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
31673 // CHECK24-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
31674 // CHECK24-NEXT:    store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4
31675 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB45]], align 4
31676 // CHECK24-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
31677 // CHECK24-NEXT:    store i32 [[TMP27]], i32* [[DOTOMP_UB46]], align 4
31678 // CHECK24-NEXT:    store i32 0, i32* [[I47]], align 4
31679 // CHECK24-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31680 // CHECK24-NEXT:    [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]]
31681 // CHECK24-NEXT:    br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]]
31682 // CHECK24:       simd.if.then49:
31683 // CHECK24-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_LB45]], align 4
31684 // CHECK24-NEXT:    store i32 [[TMP29]], i32* [[DOTOMP_IV50]], align 4
31685 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND52:%.*]]
31686 // CHECK24:       omp.inner.for.cond52:
31687 // CHECK24-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31688 // CHECK24-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_UB46]], align 4, !llvm.access.group !10
31689 // CHECK24-NEXT:    [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]]
31690 // CHECK24-NEXT:    br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]]
31691 // CHECK24:       omp.inner.for.body54:
31692 // CHECK24-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31693 // CHECK24-NEXT:    [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1
31694 // CHECK24-NEXT:    [[ADD56:%.*]] = add nsw i32 0, [[MUL55]]
31695 // CHECK24-NEXT:    store i32 [[ADD56]], i32* [[I51]], align 4, !llvm.access.group !10
31696 // CHECK24-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I51]], align 4, !llvm.access.group !10
31697 // CHECK24-NEXT:    [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP33]]
31698 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX57]], align 4, !llvm.access.group !10
31699 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE58:%.*]]
31700 // CHECK24:       omp.body.continue58:
31701 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC59:%.*]]
31702 // CHECK24:       omp.inner.for.inc59:
31703 // CHECK24-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31704 // CHECK24-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1
31705 // CHECK24-NEXT:    store i32 [[ADD60]], i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10
31706 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]]
31707 // CHECK24:       omp.inner.for.end61:
31708 // CHECK24-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
31709 // CHECK24-NEXT:    [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0
31710 // CHECK24-NEXT:    [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
31711 // CHECK24-NEXT:    [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1
31712 // CHECK24-NEXT:    [[ADD65:%.*]] = add nsw i32 0, [[MUL64]]
31713 // CHECK24-NEXT:    store i32 [[ADD65]], i32* [[I51]], align 4
31714 // CHECK24-NEXT:    br label [[SIMD_IF_END66]]
31715 // CHECK24:       simd.if.end66:
31716 // CHECK24-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
31717 // CHECK24-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_68]], align 4
31718 // CHECK24-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31719 // CHECK24-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP37]], 0
31720 // CHECK24-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
31721 // CHECK24-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
31722 // CHECK24-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
31723 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB73]], align 4
31724 // CHECK24-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
31725 // CHECK24-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB74]], align 4
31726 // CHECK24-NEXT:    store i32 0, i32* [[I75]], align 4
31727 // CHECK24-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31728 // CHECK24-NEXT:    [[CMP76:%.*]] = icmp slt i32 0, [[TMP39]]
31729 // CHECK24-NEXT:    br i1 [[CMP76]], label [[SIMD_IF_THEN77:%.*]], label [[SIMD_IF_END94:%.*]]
31730 // CHECK24:       simd.if.then77:
31731 // CHECK24-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB73]], align 4
31732 // CHECK24-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV78]], align 4
31733 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND80:%.*]]
31734 // CHECK24:       omp.inner.for.cond80:
31735 // CHECK24-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31736 // CHECK24-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB74]], align 4, !llvm.access.group !13
31737 // CHECK24-NEXT:    [[CMP81:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
31738 // CHECK24-NEXT:    br i1 [[CMP81]], label [[OMP_INNER_FOR_BODY82:%.*]], label [[OMP_INNER_FOR_END89:%.*]]
31739 // CHECK24:       omp.inner.for.body82:
31740 // CHECK24-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31741 // CHECK24-NEXT:    [[MUL83:%.*]] = mul nsw i32 [[TMP43]], 1
31742 // CHECK24-NEXT:    [[ADD84:%.*]] = add nsw i32 0, [[MUL83]]
31743 // CHECK24-NEXT:    store i32 [[ADD84]], i32* [[I79]], align 4, !llvm.access.group !13
31744 // CHECK24-NEXT:    [[TMP44:%.*]] = load i32, i32* [[I79]], align 4, !llvm.access.group !13
31745 // CHECK24-NEXT:    [[ARRAYIDX85:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP44]]
31746 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX85]], align 4, !llvm.access.group !13
31747 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE86:%.*]]
31748 // CHECK24:       omp.body.continue86:
31749 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC87:%.*]]
31750 // CHECK24:       omp.inner.for.inc87:
31751 // CHECK24-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31752 // CHECK24-NEXT:    [[ADD88:%.*]] = add nsw i32 [[TMP45]], 1
31753 // CHECK24-NEXT:    store i32 [[ADD88]], i32* [[DOTOMP_IV78]], align 4, !llvm.access.group !13
31754 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND80]], !llvm.loop [[LOOP14:![0-9]+]]
31755 // CHECK24:       omp.inner.for.end89:
31756 // CHECK24-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
31757 // CHECK24-NEXT:    [[SUB90:%.*]] = sub nsw i32 [[TMP46]], 0
31758 // CHECK24-NEXT:    [[DIV91:%.*]] = sdiv i32 [[SUB90]], 1
31759 // CHECK24-NEXT:    [[MUL92:%.*]] = mul nsw i32 [[DIV91]], 1
31760 // CHECK24-NEXT:    [[ADD93:%.*]] = add nsw i32 0, [[MUL92]]
31761 // CHECK24-NEXT:    store i32 [[ADD93]], i32* [[I79]], align 4
31762 // CHECK24-NEXT:    br label [[SIMD_IF_END94]]
31763 // CHECK24:       simd.if.end94:
31764 // CHECK24-NEXT:    [[TMP47:%.*]] = load i32, i32* [[M]], align 4
31765 // CHECK24-NEXT:    store i32 [[TMP47]], i32* [[DOTCAPTURE_EXPR_95]], align 4
31766 // CHECK24-NEXT:    [[TMP48:%.*]] = load i32, i32* [[N]], align 4
31767 // CHECK24-NEXT:    store i32 [[TMP48]], i32* [[DOTCAPTURE_EXPR_97]], align 4
31768 // CHECK24-NEXT:    [[TMP49:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31769 // CHECK24-NEXT:    [[SUB99:%.*]] = sub nsw i32 [[TMP49]], 0
31770 // CHECK24-NEXT:    [[DIV100:%.*]] = sdiv i32 [[SUB99]], 1
31771 // CHECK24-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[DIV100]], 1
31772 // CHECK24-NEXT:    store i32 [[SUB101]], i32* [[DOTCAPTURE_EXPR_98]], align 4
31773 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB102]], align 4
31774 // CHECK24-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_98]], align 4
31775 // CHECK24-NEXT:    store i32 [[TMP50]], i32* [[DOTOMP_UB103]], align 4
31776 // CHECK24-NEXT:    store i32 0, i32* [[I104]], align 4
31777 // CHECK24-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31778 // CHECK24-NEXT:    [[CMP105:%.*]] = icmp slt i32 0, [[TMP51]]
31779 // CHECK24-NEXT:    br i1 [[CMP105]], label [[SIMD_IF_THEN106:%.*]], label [[SIMD_IF_END123:%.*]]
31780 // CHECK24:       simd.if.then106:
31781 // CHECK24-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_LB102]], align 4
31782 // CHECK24-NEXT:    store i32 [[TMP52]], i32* [[DOTOMP_IV107]], align 4
31783 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND109:%.*]]
31784 // CHECK24:       omp.inner.for.cond109:
31785 // CHECK24-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31786 // CHECK24-NEXT:    [[TMP54:%.*]] = load i32, i32* [[DOTOMP_UB103]], align 4, !llvm.access.group !16
31787 // CHECK24-NEXT:    [[CMP110:%.*]] = icmp sle i32 [[TMP53]], [[TMP54]]
31788 // CHECK24-NEXT:    br i1 [[CMP110]], label [[OMP_INNER_FOR_BODY111:%.*]], label [[OMP_INNER_FOR_END118:%.*]]
31789 // CHECK24:       omp.inner.for.body111:
31790 // CHECK24-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31791 // CHECK24-NEXT:    [[MUL112:%.*]] = mul nsw i32 [[TMP55]], 1
31792 // CHECK24-NEXT:    [[ADD113:%.*]] = add nsw i32 0, [[MUL112]]
31793 // CHECK24-NEXT:    store i32 [[ADD113]], i32* [[I108]], align 4, !llvm.access.group !16
31794 // CHECK24-NEXT:    [[TMP56:%.*]] = load i32, i32* [[I108]], align 4, !llvm.access.group !16
31795 // CHECK24-NEXT:    [[ARRAYIDX114:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP56]]
31796 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX114]], align 4, !llvm.access.group !16
31797 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE115:%.*]]
31798 // CHECK24:       omp.body.continue115:
31799 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC116:%.*]]
31800 // CHECK24:       omp.inner.for.inc116:
31801 // CHECK24-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31802 // CHECK24-NEXT:    [[ADD117:%.*]] = add nsw i32 [[TMP57]], 1
31803 // CHECK24-NEXT:    store i32 [[ADD117]], i32* [[DOTOMP_IV107]], align 4, !llvm.access.group !16
31804 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND109]], !llvm.loop [[LOOP17:![0-9]+]]
31805 // CHECK24:       omp.inner.for.end118:
31806 // CHECK24-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_97]], align 4
31807 // CHECK24-NEXT:    [[SUB119:%.*]] = sub nsw i32 [[TMP58]], 0
31808 // CHECK24-NEXT:    [[DIV120:%.*]] = sdiv i32 [[SUB119]], 1
31809 // CHECK24-NEXT:    [[MUL121:%.*]] = mul nsw i32 [[DIV120]], 1
31810 // CHECK24-NEXT:    [[ADD122:%.*]] = add nsw i32 0, [[MUL121]]
31811 // CHECK24-NEXT:    store i32 [[ADD122]], i32* [[I108]], align 4
31812 // CHECK24-NEXT:    br label [[SIMD_IF_END123]]
31813 // CHECK24:       simd.if.end123:
31814 // CHECK24-NEXT:    [[TMP59:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
31815 // CHECK24-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP59]])
31816 // CHECK24-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
31817 // CHECK24-NEXT:    [[TMP60:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
31818 // CHECK24-NEXT:    call void @llvm.stackrestore(i8* [[TMP60]])
31819 // CHECK24-NEXT:    [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
31820 // CHECK24-NEXT:    ret i32 [[TMP61]]
31821 //
31822 //
31823 // CHECK24-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
31824 // CHECK24-SAME: (i32 [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
31825 // CHECK24-NEXT:  entry:
31826 // CHECK24-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
31827 // CHECK24-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
31828 // CHECK24-NEXT:    [[M:%.*]] = alloca i32, align 4
31829 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
31830 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
31831 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
31832 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
31833 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
31834 // CHECK24-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
31835 // CHECK24-NEXT:    [[DOTOMP_LB3:%.*]] = alloca i32, align 4
31836 // CHECK24-NEXT:    [[DOTOMP_UB4:%.*]] = alloca i32, align 4
31837 // CHECK24-NEXT:    [[DOTOMP_IV5:%.*]] = alloca i32, align 4
31838 // CHECK24-NEXT:    [[I6:%.*]] = alloca i32, align 4
31839 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
31840 // CHECK24-NEXT:    [[_TMP17:%.*]] = alloca i32, align 4
31841 // CHECK24-NEXT:    [[DOTOMP_LB18:%.*]] = alloca i32, align 4
31842 // CHECK24-NEXT:    [[DOTOMP_UB19:%.*]] = alloca i32, align 4
31843 // CHECK24-NEXT:    [[DOTOMP_IV20:%.*]] = alloca i32, align 4
31844 // CHECK24-NEXT:    [[I21:%.*]] = alloca i32, align 4
31845 // CHECK24-NEXT:    [[_TMP32:%.*]] = alloca i32, align 4
31846 // CHECK24-NEXT:    [[DOTOMP_LB33:%.*]] = alloca i32, align 4
31847 // CHECK24-NEXT:    [[DOTOMP_UB34:%.*]] = alloca i32, align 4
31848 // CHECK24-NEXT:    [[DOTOMP_IV35:%.*]] = alloca i32, align 4
31849 // CHECK24-NEXT:    [[I36:%.*]] = alloca i32, align 4
31850 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4
31851 // CHECK24-NEXT:    [[_TMP48:%.*]] = alloca i32, align 4
31852 // CHECK24-NEXT:    [[DOTOMP_LB49:%.*]] = alloca i32, align 4
31853 // CHECK24-NEXT:    [[DOTOMP_UB50:%.*]] = alloca i32, align 4
31854 // CHECK24-NEXT:    [[DOTOMP_IV51:%.*]] = alloca i32, align 4
31855 // CHECK24-NEXT:    [[I52:%.*]] = alloca i32, align 4
31856 // CHECK24-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
31857 // CHECK24-NEXT:    store i32 10, i32* [[M]], align 4
31858 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
31859 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
31860 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
31861 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
31862 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
31863 // CHECK24:       omp.inner.for.cond:
31864 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31865 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
31866 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
31867 // CHECK24-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
31868 // CHECK24:       omp.inner.for.body:
31869 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31870 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
31871 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
31872 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
31873 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !19
31874 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP4]]
31875 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
31876 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
31877 // CHECK24:       omp.body.continue:
31878 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
31879 // CHECK24:       omp.inner.for.inc:
31880 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31881 // CHECK24-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
31882 // CHECK24-NEXT:    store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
31883 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
31884 // CHECK24:       omp.inner.for.end:
31885 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
31886 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB3]], align 4
31887 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB4]], align 4
31888 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4
31889 // CHECK24-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4
31890 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND7:%.*]]
31891 // CHECK24:       omp.inner.for.cond7:
31892 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31893 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !22
31894 // CHECK24-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
31895 // CHECK24-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
31896 // CHECK24:       omp.inner.for.body9:
31897 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31898 // CHECK24-NEXT:    [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
31899 // CHECK24-NEXT:    [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
31900 // CHECK24-NEXT:    store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !22
31901 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !22
31902 // CHECK24-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP10]]
31903 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX12]], align 4, !llvm.access.group !22
31904 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE13:%.*]]
31905 // CHECK24:       omp.body.continue13:
31906 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC14:%.*]]
31907 // CHECK24:       omp.inner.for.inc14:
31908 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31909 // CHECK24-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1
31910 // CHECK24-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !22
31911 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]]
31912 // CHECK24:       omp.inner.for.end16:
31913 // CHECK24-NEXT:    store i32 10, i32* [[I6]], align 4
31914 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[M]], align 4
31915 // CHECK24-NEXT:    store i32 [[TMP12]], i32* [[DOTCAPTURE_EXPR_]], align 4
31916 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB18]], align 4
31917 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB19]], align 4
31918 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB18]], align 4
31919 // CHECK24-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV20]], align 4
31920 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND22:%.*]]
31921 // CHECK24:       omp.inner.for.cond22:
31922 // CHECK24-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31923 // CHECK24-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB19]], align 4, !llvm.access.group !25
31924 // CHECK24-NEXT:    [[CMP23:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
31925 // CHECK24-NEXT:    br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
31926 // CHECK24:       omp.inner.for.body24:
31927 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31928 // CHECK24-NEXT:    [[MUL25:%.*]] = mul nsw i32 [[TMP16]], 1
31929 // CHECK24-NEXT:    [[ADD26:%.*]] = add nsw i32 0, [[MUL25]]
31930 // CHECK24-NEXT:    store i32 [[ADD26]], i32* [[I21]], align 4, !llvm.access.group !25
31931 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I21]], align 4, !llvm.access.group !25
31932 // CHECK24-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP17]]
31933 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX27]], align 4, !llvm.access.group !25
31934 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
31935 // CHECK24:       omp.body.continue28:
31936 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
31937 // CHECK24:       omp.inner.for.inc29:
31938 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31939 // CHECK24-NEXT:    [[ADD30:%.*]] = add nsw i32 [[TMP18]], 1
31940 // CHECK24-NEXT:    store i32 [[ADD30]], i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !25
31941 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP26:![0-9]+]]
31942 // CHECK24:       omp.inner.for.end31:
31943 // CHECK24-NEXT:    store i32 10, i32* [[I21]], align 4
31944 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB33]], align 4
31945 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB34]], align 4
31946 // CHECK24-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB33]], align 4
31947 // CHECK24-NEXT:    store i32 [[TMP19]], i32* [[DOTOMP_IV35]], align 4
31948 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND37:%.*]]
31949 // CHECK24:       omp.inner.for.cond37:
31950 // CHECK24-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31951 // CHECK24-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB34]], align 4, !llvm.access.group !28
31952 // CHECK24-NEXT:    [[CMP38:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
31953 // CHECK24-NEXT:    br i1 [[CMP38]], label [[OMP_INNER_FOR_BODY39:%.*]], label [[OMP_INNER_FOR_END46:%.*]]
31954 // CHECK24:       omp.inner.for.body39:
31955 // CHECK24-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31956 // CHECK24-NEXT:    [[MUL40:%.*]] = mul nsw i32 [[TMP22]], 1
31957 // CHECK24-NEXT:    [[ADD41:%.*]] = add nsw i32 0, [[MUL40]]
31958 // CHECK24-NEXT:    store i32 [[ADD41]], i32* [[I36]], align 4, !llvm.access.group !28
31959 // CHECK24-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I36]], align 4, !llvm.access.group !28
31960 // CHECK24-NEXT:    [[ARRAYIDX42:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP23]]
31961 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX42]], align 4, !llvm.access.group !28
31962 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE43:%.*]]
31963 // CHECK24:       omp.body.continue43:
31964 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC44:%.*]]
31965 // CHECK24:       omp.inner.for.inc44:
31966 // CHECK24-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31967 // CHECK24-NEXT:    [[ADD45:%.*]] = add nsw i32 [[TMP24]], 1
31968 // CHECK24-NEXT:    store i32 [[ADD45]], i32* [[DOTOMP_IV35]], align 4, !llvm.access.group !28
31969 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND37]], !llvm.loop [[LOOP29:![0-9]+]]
31970 // CHECK24:       omp.inner.for.end46:
31971 // CHECK24-NEXT:    store i32 10, i32* [[I36]], align 4
31972 // CHECK24-NEXT:    [[TMP25:%.*]] = load i32, i32* [[M]], align 4
31973 // CHECK24-NEXT:    store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_47]], align 4
31974 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB49]], align 4
31975 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB50]], align 4
31976 // CHECK24-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB49]], align 4
31977 // CHECK24-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV51]], align 4
31978 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND53:%.*]]
31979 // CHECK24:       omp.inner.for.cond53:
31980 // CHECK24-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31981 // CHECK24-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB50]], align 4, !llvm.access.group !31
31982 // CHECK24-NEXT:    [[CMP54:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
31983 // CHECK24-NEXT:    br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END62:%.*]]
31984 // CHECK24:       omp.inner.for.body55:
31985 // CHECK24-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31986 // CHECK24-NEXT:    [[MUL56:%.*]] = mul nsw i32 [[TMP29]], 1
31987 // CHECK24-NEXT:    [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
31988 // CHECK24-NEXT:    store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !31
31989 // CHECK24-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !31
31990 // CHECK24-NEXT:    [[ARRAYIDX58:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP30]]
31991 // CHECK24-NEXT:    store i32 0, i32* [[ARRAYIDX58]], align 4, !llvm.access.group !31
31992 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE59:%.*]]
31993 // CHECK24:       omp.body.continue59:
31994 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC60:%.*]]
31995 // CHECK24:       omp.inner.for.inc60:
31996 // CHECK24-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31997 // CHECK24-NEXT:    [[ADD61:%.*]] = add nsw i32 [[TMP31]], 1
31998 // CHECK24-NEXT:    store i32 [[ADD61]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !31
31999 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP32:![0-9]+]]
32000 // CHECK24:       omp.inner.for.end62:
32001 // CHECK24-NEXT:    store i32 10, i32* [[I52]], align 4
32002 // CHECK24-NEXT:    ret i32 0
32003 //
32004