1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -fopenmp-optimistic-collapse -o - | FileCheck %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
7 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK3
8 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK4
9 
10 // expected-no-diagnostics
11 #ifndef HEADER
12 #define HEADER
13 
14 #define N 1000
15 #define M 10
16 
17 template<typename tx>
ftemplate(int n)18 tx ftemplate(int n) {
19   tx a[N];
20   short aa[N];
21   tx b[10];
22   tx c[M][M];
23   tx f = n;
24   tx l;
25   int k;
26   tx *v;
27 
28 #pragma omp target teams distribute parallel for lastprivate(l) dist_schedule(static,128) schedule(static,32)
29   for(int i = 0; i < n; i++) {
30     a[i] = 1;
31     l = i;
32   }
33 
34 #pragma omp target teams distribute parallel for map(tofrom: aa) num_teams(M) thread_limit(64)
35   for(int i = 0; i < n; i++) {
36     aa[i] += 1;
37   }
38 
39 #pragma omp target teams distribute parallel for map(tofrom:a, aa, b) if(target: n>40) proc_bind(spread)
40   for(int i = 0; i < 10; i++) {
41     b[i] += 1;
42   }
43 
44 #pragma omp target teams distribute parallel for collapse(2) firstprivate(f) private(k)
45   for(int i = 0; i < M; i++) {
46     for(int j = 0; j < M; j++) {
47       k = M;
48       c[i][j] = i + j * f + k;
49     }
50   }
51 
52 #pragma omp target teams distribute parallel for collapse(2)
53   for(int i = 0; i < n; i++) {
54     for(int j = 0; j < n; j++) {
55       c[i][j] = i + j;
56     }
57   }
58 
59 #pragma omp target teams distribute parallel for map(a, v[:N])
60   for(int i = 0; i < n; i++)
61     a[i] = v[i];
62   return a[0];
63 }
64 
bar(int n)65 int bar(int n){
66   int a = 0;
67 
68   a += ftemplate<int>(n);
69 
70   return a;
71 }
72 
73 #endif
74 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
75 // CHECK5-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
76 // CHECK5-NEXT:  entry:
77 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
78 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
79 // CHECK5-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
80 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
81 // CHECK5-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
82 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
83 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
84 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
85 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
86 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
87 // CHECK5-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
88 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
89 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
90 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
91 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
92 // CHECK5:       .execute:
93 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
94 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
95 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
96 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
97 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
98 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
99 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
100 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
101 // CHECK5-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
102 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
103 // CHECK5:       .omp.deinit:
104 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
105 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
106 // CHECK5:       .exit:
107 // CHECK5-NEXT:    ret void
108 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__
109 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
110 // CHECK5-NEXT:  entry:
111 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
112 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
113 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
114 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
115 // CHECK5-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
116 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
117 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
118 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
119 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
120 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
121 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
122 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
123 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
124 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
125 // CHECK5-NEXT:    [[I4:%.*]] = alloca i32, align 4
126 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
127 // CHECK5-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
128 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
129 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
130 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
131 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
132 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
133 // CHECK5-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
134 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
135 // CHECK5-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
136 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4
137 // CHECK5-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
138 // CHECK5-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4
139 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0
140 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
141 // CHECK5-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
142 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
143 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
144 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
145 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
146 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
147 // CHECK5-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
148 // CHECK5-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
149 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
150 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
151 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
152 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
153 // CHECK5:       omp.precond.then:
154 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
155 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
156 // CHECK5-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
157 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
158 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
159 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
160 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
161 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
162 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
163 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
164 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
165 // CHECK5-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
166 // CHECK5:       cond.true:
167 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
168 // CHECK5-NEXT:    br label [[COND_END:%.*]]
169 // CHECK5:       cond.false:
170 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
171 // CHECK5-NEXT:    br label [[COND_END]]
172 // CHECK5:       cond.end:
173 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
174 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
175 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
176 // CHECK5-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
177 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
178 // CHECK5:       omp.inner.for.cond:
179 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
180 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
181 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
182 // CHECK5-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
183 // CHECK5-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
184 // CHECK5:       omp.inner.for.body:
185 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
186 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
187 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
188 // CHECK5-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
189 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
190 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[L_ADDR]], align 4
191 // CHECK5-NEXT:    store i32 [[TMP23]], i32* [[L_CASTED]], align 4
192 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[L_CASTED]], align 4
193 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
194 // CHECK5-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP19]] to i8*
195 // CHECK5-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
196 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
197 // CHECK5-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP20]] to i8*
198 // CHECK5-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
199 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
200 // CHECK5-NEXT:    [[TMP30:%.*]] = inttoptr i32 [[TMP22]] to i8*
201 // CHECK5-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
202 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
203 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
204 // CHECK5-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 4
205 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
206 // CHECK5-NEXT:    [[TMP34:%.*]] = inttoptr i32 [[TMP24]] to i8*
207 // CHECK5-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 4
208 // CHECK5-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
209 // CHECK5-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
210 // CHECK5-NEXT:    [[TMP37:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
211 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP37]], i32 5)
212 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
213 // CHECK5:       omp.inner.for.inc:
214 // CHECK5-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
215 // CHECK5-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
216 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
217 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
218 // CHECK5-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
219 // CHECK5-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
220 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
221 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
222 // CHECK5-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
223 // CHECK5-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
224 // CHECK5-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
225 // CHECK5-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
226 // CHECK5-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
227 // CHECK5-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
228 // CHECK5-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP44]], [[TMP45]]
229 // CHECK5-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
230 // CHECK5:       cond.true11:
231 // CHECK5-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
232 // CHECK5-NEXT:    br label [[COND_END13:%.*]]
233 // CHECK5:       cond.false12:
234 // CHECK5-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
235 // CHECK5-NEXT:    br label [[COND_END13]]
236 // CHECK5:       cond.end13:
237 // CHECK5-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP46]], [[COND_TRUE11]] ], [ [[TMP47]], [[COND_FALSE12]] ]
238 // CHECK5-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
239 // CHECK5-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
240 // CHECK5-NEXT:    store i32 [[TMP48]], i32* [[DOTOMP_IV]], align 4
241 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
242 // CHECK5:       omp.inner.for.end:
243 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
244 // CHECK5:       omp.loop.exit:
245 // CHECK5-NEXT:    [[TMP49:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
246 // CHECK5-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP49]], align 4
247 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP50]])
248 // CHECK5-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
249 // CHECK5-NEXT:    [[TMP52:%.*]] = icmp ne i32 [[TMP51]], 0
250 // CHECK5-NEXT:    br i1 [[TMP52]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
251 // CHECK5:       .omp.lastprivate.then:
252 // CHECK5-NEXT:    [[TMP53:%.*]] = load i32, i32* [[L_ADDR]], align 4
253 // CHECK5-NEXT:    store i32 [[TMP53]], i32* [[L_ADDR]], align 4
254 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
255 // CHECK5:       .omp.lastprivate.done:
256 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
257 // CHECK5:       omp.precond.end:
258 // CHECK5-NEXT:    [[TMP54:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
259 // CHECK5-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP54]])
260 // CHECK5-NEXT:    ret void
261 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__1
262 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
263 // CHECK5-NEXT:  entry:
264 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
265 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
266 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
267 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
268 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
269 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
270 // CHECK5-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
271 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
272 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
273 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
274 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
275 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
276 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
277 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
278 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
279 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
280 // CHECK5-NEXT:    [[I3:%.*]] = alloca i32, align 4
281 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
282 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
283 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
284 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
285 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
286 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
287 // CHECK5-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
288 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
289 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
290 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
291 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
292 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
293 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
294 // CHECK5-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
295 // CHECK5-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
296 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
297 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
298 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
299 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
300 // CHECK5:       omp.precond.then:
301 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
302 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
303 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
304 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
305 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
306 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
307 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
308 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
309 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
310 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
311 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
312 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
313 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
314 // CHECK5:       omp.dispatch.cond:
315 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
316 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
317 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
318 // CHECK5-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
319 // CHECK5:       cond.true:
320 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
321 // CHECK5-NEXT:    br label [[COND_END:%.*]]
322 // CHECK5:       cond.false:
323 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
324 // CHECK5-NEXT:    br label [[COND_END]]
325 // CHECK5:       cond.end:
326 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
327 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
328 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
329 // CHECK5-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
330 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
331 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
332 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
333 // CHECK5-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
334 // CHECK5:       omp.dispatch.body:
335 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
336 // CHECK5:       omp.inner.for.cond:
337 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
338 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
339 // CHECK5-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
340 // CHECK5-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
341 // CHECK5:       omp.inner.for.body:
342 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
343 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
344 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
345 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
346 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
347 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
348 // CHECK5-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
349 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
350 // CHECK5-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
351 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
352 // CHECK5:       omp.body.continue:
353 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
354 // CHECK5:       omp.inner.for.inc:
355 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
356 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
357 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
358 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
359 // CHECK5:       omp.inner.for.end:
360 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
361 // CHECK5:       omp.dispatch.inc:
362 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
363 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
364 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
365 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
366 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
367 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
368 // CHECK5-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
369 // CHECK5-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
370 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
371 // CHECK5:       omp.dispatch.end:
372 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
373 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
374 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
375 // CHECK5-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
376 // CHECK5-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
377 // CHECK5-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
378 // CHECK5:       .omp.lastprivate.then:
379 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
380 // CHECK5-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
381 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
382 // CHECK5:       .omp.lastprivate.done:
383 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
384 // CHECK5:       omp.precond.end:
385 // CHECK5-NEXT:    ret void
386 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
387 // CHECK5-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
388 // CHECK5-NEXT:  entry:
389 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
390 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
391 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
392 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
393 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
394 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
395 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
396 // CHECK5-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
397 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
398 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
399 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
400 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
401 // CHECK5:       .execute:
402 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
403 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
404 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
405 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
406 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
407 // CHECK5-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
408 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
409 // CHECK5:       .omp.deinit:
410 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
411 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
412 // CHECK5:       .exit:
413 // CHECK5-NEXT:    ret void
414 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__2
415 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
416 // CHECK5-NEXT:  entry:
417 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
418 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
419 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
420 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
421 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
422 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
423 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
424 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
425 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
426 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
427 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
428 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
429 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
430 // CHECK5-NEXT:    [[I3:%.*]] = alloca i32, align 4
431 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
432 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
433 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
434 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
435 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
436 // CHECK5-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
437 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
438 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
439 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
440 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
441 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
442 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
443 // CHECK5-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
444 // CHECK5-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
445 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
446 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
447 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
448 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
449 // CHECK5:       omp.precond.then:
450 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
451 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
452 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
453 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
454 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
455 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
456 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
457 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
458 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
459 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
460 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
461 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
462 // CHECK5-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
463 // CHECK5:       cond.true:
464 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
465 // CHECK5-NEXT:    br label [[COND_END:%.*]]
466 // CHECK5:       cond.false:
467 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
468 // CHECK5-NEXT:    br label [[COND_END]]
469 // CHECK5:       cond.end:
470 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
471 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
472 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
473 // CHECK5-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
474 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
475 // CHECK5:       omp.inner.for.cond:
476 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
477 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
478 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
479 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
480 // CHECK5-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
481 // CHECK5:       omp.inner.for.body:
482 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
483 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
484 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
485 // CHECK5-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
486 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
487 // CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
488 // CHECK5-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
489 // CHECK5-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
490 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
491 // CHECK5-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
492 // CHECK5-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
493 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
494 // CHECK5-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
495 // CHECK5-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
496 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
497 // CHECK5-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
498 // CHECK5-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
499 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
500 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
501 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
502 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
503 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
504 // CHECK5:       omp.inner.for.inc:
505 // CHECK5-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
506 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
507 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
508 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
509 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
510 // CHECK5-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
511 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
512 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
513 // CHECK5-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
514 // CHECK5-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
515 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
516 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
517 // CHECK5-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
518 // CHECK5-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
519 // CHECK5-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
520 // CHECK5-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
521 // CHECK5:       cond.true10:
522 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
523 // CHECK5-NEXT:    br label [[COND_END12:%.*]]
524 // CHECK5:       cond.false11:
525 // CHECK5-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
526 // CHECK5-NEXT:    br label [[COND_END12]]
527 // CHECK5:       cond.end12:
528 // CHECK5-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
529 // CHECK5-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
530 // CHECK5-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
531 // CHECK5-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
532 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
533 // CHECK5:       omp.inner.for.end:
534 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
535 // CHECK5:       omp.loop.exit:
536 // CHECK5-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
537 // CHECK5-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
538 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
539 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
540 // CHECK5:       omp.precond.end:
541 // CHECK5-NEXT:    ret void
542 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__3
543 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
544 // CHECK5-NEXT:  entry:
545 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
546 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
547 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
548 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
549 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
550 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
551 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
552 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
553 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
554 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
555 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
556 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
557 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
558 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
559 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
560 // CHECK5-NEXT:    [[I3:%.*]] = alloca i32, align 4
561 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
562 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
563 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
564 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
565 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
566 // CHECK5-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
567 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
568 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
569 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
570 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
571 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
572 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
573 // CHECK5-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
574 // CHECK5-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
575 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
576 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
577 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
578 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
579 // CHECK5:       omp.precond.then:
580 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
581 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
582 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
583 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
584 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
585 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
586 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
587 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
588 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
589 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
590 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
591 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
592 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
593 // CHECK5-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
594 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
595 // CHECK5:       omp.inner.for.cond:
596 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
597 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
598 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
599 // CHECK5-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
600 // CHECK5:       omp.inner.for.body:
601 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
602 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
603 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
604 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
605 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
606 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
607 // CHECK5-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
608 // CHECK5-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
609 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
610 // CHECK5-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
611 // CHECK5-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
612 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
613 // CHECK5:       omp.body.continue:
614 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
615 // CHECK5:       omp.inner.for.inc:
616 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
617 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
618 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
619 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
620 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
621 // CHECK5:       omp.inner.for.end:
622 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
623 // CHECK5:       omp.loop.exit:
624 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
625 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
626 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
627 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
628 // CHECK5:       omp.precond.end:
629 // CHECK5-NEXT:    ret void
630 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
631 // CHECK5-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
632 // CHECK5-NEXT:  entry:
633 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
634 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
635 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
636 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
637 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
638 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
639 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
640 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
641 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
642 // CHECK5:       .execute:
643 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
644 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
645 // CHECK5-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
646 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
647 // CHECK5:       .omp.deinit:
648 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
649 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
650 // CHECK5:       .exit:
651 // CHECK5-NEXT:    ret void
652 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__4
653 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
654 // CHECK5-NEXT:  entry:
655 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
656 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
657 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
658 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
659 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
660 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
661 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
662 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
663 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
664 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
665 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
666 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
667 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
668 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
669 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
670 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
671 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
672 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
673 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
674 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
675 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
676 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
677 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
678 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
679 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
680 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
681 // CHECK5:       cond.true:
682 // CHECK5-NEXT:    br label [[COND_END:%.*]]
683 // CHECK5:       cond.false:
684 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
685 // CHECK5-NEXT:    br label [[COND_END]]
686 // CHECK5:       cond.end:
687 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
688 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
689 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
690 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
691 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
692 // CHECK5:       omp.inner.for.cond:
693 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
694 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
695 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
696 // CHECK5:       omp.inner.for.body:
697 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
698 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
699 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
700 // CHECK5-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
701 // CHECK5-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
702 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
703 // CHECK5-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
704 // CHECK5-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
705 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
706 // CHECK5-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
707 // CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
708 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
709 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
710 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
711 // CHECK5:       omp.inner.for.inc:
712 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
713 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
714 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
715 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
716 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
717 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
718 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
719 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
720 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
721 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
722 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
723 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
724 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
725 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
726 // CHECK5-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
727 // CHECK5:       cond.true5:
728 // CHECK5-NEXT:    br label [[COND_END7:%.*]]
729 // CHECK5:       cond.false6:
730 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
731 // CHECK5-NEXT:    br label [[COND_END7]]
732 // CHECK5:       cond.end7:
733 // CHECK5-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
734 // CHECK5-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
735 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
736 // CHECK5-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
737 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
738 // CHECK5:       omp.inner.for.end:
739 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
740 // CHECK5:       omp.loop.exit:
741 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
742 // CHECK5-NEXT:    ret void
743 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__5
744 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
745 // CHECK5-NEXT:  entry:
746 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
747 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
748 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
749 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
750 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
751 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
752 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
753 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
754 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
755 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
756 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
757 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
758 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
759 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
760 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
761 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
762 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
763 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
764 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
765 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
766 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
767 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
768 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
769 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
770 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
771 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
772 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
773 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
774 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
775 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
776 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
777 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
778 // CHECK5:       omp.inner.for.cond:
779 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
780 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
781 // CHECK5-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
782 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
783 // CHECK5:       omp.inner.for.body:
784 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
785 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
786 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
787 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
788 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
789 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
790 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
791 // CHECK5-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
792 // CHECK5-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
793 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
794 // CHECK5:       omp.body.continue:
795 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
796 // CHECK5:       omp.inner.for.inc:
797 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
798 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
799 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
800 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
801 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
802 // CHECK5:       omp.inner.for.end:
803 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
804 // CHECK5:       omp.loop.exit:
805 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
806 // CHECK5-NEXT:    ret void
807 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
808 // CHECK5-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
809 // CHECK5-NEXT:  entry:
810 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
811 // CHECK5-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
812 // CHECK5-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
813 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
814 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
815 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
816 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
817 // CHECK5-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
818 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
819 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
820 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
821 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
822 // CHECK5:       .execute:
823 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
824 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
825 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
826 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
827 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
828 // CHECK5-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
829 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
830 // CHECK5:       .omp.deinit:
831 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
832 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
833 // CHECK5:       .exit:
834 // CHECK5-NEXT:    ret void
835 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__6
836 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
837 // CHECK5-NEXT:  entry:
838 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
839 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
840 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
841 // CHECK5-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
842 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
843 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
844 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
845 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
846 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
847 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
848 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
849 // CHECK5-NEXT:    [[K:%.*]] = alloca i32, align 4
850 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
851 // CHECK5-NEXT:    [[J:%.*]] = alloca i32, align 4
852 // CHECK5-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
853 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
854 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
855 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
856 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
857 // CHECK5-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
858 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
859 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
860 // CHECK5-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
861 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
862 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
863 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
864 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
865 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
866 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
867 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
868 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
869 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
870 // CHECK5:       cond.true:
871 // CHECK5-NEXT:    br label [[COND_END:%.*]]
872 // CHECK5:       cond.false:
873 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
874 // CHECK5-NEXT:    br label [[COND_END]]
875 // CHECK5:       cond.end:
876 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
877 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
878 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
879 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
880 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
881 // CHECK5:       omp.inner.for.cond:
882 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
883 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
884 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
885 // CHECK5:       omp.inner.for.body:
886 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
887 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
888 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
889 // CHECK5-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
890 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
891 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
892 // CHECK5-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
893 // CHECK5-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
894 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
895 // CHECK5-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
896 // CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
897 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
898 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
899 // CHECK5-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
900 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
901 // CHECK5-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
902 // CHECK5-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
903 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
904 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
905 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
906 // CHECK5:       omp.inner.for.inc:
907 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
908 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
909 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
910 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
911 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
912 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
913 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
914 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
915 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
916 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
917 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
918 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
919 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
920 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
921 // CHECK5-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
922 // CHECK5:       cond.true6:
923 // CHECK5-NEXT:    br label [[COND_END8:%.*]]
924 // CHECK5:       cond.false7:
925 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
926 // CHECK5-NEXT:    br label [[COND_END8]]
927 // CHECK5:       cond.end8:
928 // CHECK5-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
929 // CHECK5-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
930 // CHECK5-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
931 // CHECK5-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
932 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
933 // CHECK5:       omp.inner.for.end:
934 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
935 // CHECK5:       omp.loop.exit:
936 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
937 // CHECK5-NEXT:    ret void
938 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__7
939 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
940 // CHECK5-NEXT:  entry:
941 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
942 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
943 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
944 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
945 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
946 // CHECK5-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
947 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
948 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
949 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
950 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
951 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
952 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
953 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
954 // CHECK5-NEXT:    [[K:%.*]] = alloca i32, align 4
955 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
956 // CHECK5-NEXT:    [[J:%.*]] = alloca i32, align 4
957 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
958 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
959 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
960 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
961 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
962 // CHECK5-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
963 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
964 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
965 // CHECK5-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
966 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
967 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
968 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
969 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
970 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
971 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
972 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
973 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
974 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
975 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
976 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
977 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
978 // CHECK5:       omp.inner.for.cond:
979 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
980 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
981 // CHECK5-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
982 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
983 // CHECK5:       omp.inner.for.body:
984 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
985 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
986 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
987 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
988 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
989 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
990 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
991 // CHECK5-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
992 // CHECK5-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
993 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
994 // CHECK5-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
995 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
996 // CHECK5-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
997 // CHECK5-NEXT:    store i32 10, i32* [[K]], align 4
998 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
999 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
1000 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
1001 // CHECK5-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
1002 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
1003 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
1004 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
1005 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
1006 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
1007 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
1008 // CHECK5-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
1009 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
1010 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1011 // CHECK5:       omp.body.continue:
1012 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1013 // CHECK5:       omp.inner.for.inc:
1014 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1015 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1016 // CHECK5-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
1017 // CHECK5-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
1018 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
1019 // CHECK5:       omp.inner.for.end:
1020 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1021 // CHECK5:       omp.loop.exit:
1022 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1023 // CHECK5-NEXT:    ret void
1024 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
1025 // CHECK5-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
1026 // CHECK5-NEXT:  entry:
1027 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1028 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
1029 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1030 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1031 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1032 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
1033 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1034 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
1035 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
1036 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1037 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
1038 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
1039 // CHECK5:       .execute:
1040 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
1041 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1042 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
1043 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
1044 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1045 // CHECK5-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
1046 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
1047 // CHECK5:       .omp.deinit:
1048 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
1049 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
1050 // CHECK5:       .exit:
1051 // CHECK5-NEXT:    ret void
1052 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__8
1053 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
1054 // CHECK5-NEXT:  entry:
1055 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1056 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1057 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1058 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
1059 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
1060 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1061 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1062 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1063 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1064 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
1065 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1066 // CHECK5-NEXT:    [[J:%.*]] = alloca i32, align 4
1067 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
1068 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
1069 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
1070 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1071 // CHECK5-NEXT:    [[I9:%.*]] = alloca i32, align 4
1072 // CHECK5-NEXT:    [[J10:%.*]] = alloca i32, align 4
1073 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1074 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
1075 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1076 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1077 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1078 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
1079 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
1080 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1081 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1082 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1083 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1084 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1085 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1086 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1087 // CHECK5-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
1088 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1089 // CHECK5-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
1090 // CHECK5-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
1091 // CHECK5-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
1092 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
1093 // CHECK5-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
1094 // CHECK5-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
1095 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
1096 // CHECK5-NEXT:    store i32 0, i32* [[J]], align 4
1097 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1098 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
1099 // CHECK5-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
1100 // CHECK5:       land.lhs.true:
1101 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1102 // CHECK5-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
1103 // CHECK5-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
1104 // CHECK5:       omp.precond.then:
1105 // CHECK5-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
1106 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1107 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
1108 // CHECK5-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
1109 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1110 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1111 // CHECK5-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
1112 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1113 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1114 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
1115 // CHECK5-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1116 // CHECK5-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1117 // CHECK5-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
1118 // CHECK5-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1119 // CHECK5:       cond.true:
1120 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1121 // CHECK5-NEXT:    br label [[COND_END:%.*]]
1122 // CHECK5:       cond.false:
1123 // CHECK5-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1124 // CHECK5-NEXT:    br label [[COND_END]]
1125 // CHECK5:       cond.end:
1126 // CHECK5-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1127 // CHECK5-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
1128 // CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
1129 // CHECK5-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
1130 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1131 // CHECK5:       omp.inner.for.cond:
1132 // CHECK5-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1133 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1134 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
1135 // CHECK5-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
1136 // CHECK5-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1137 // CHECK5:       omp.inner.for.body:
1138 // CHECK5-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
1139 // CHECK5-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
1140 // CHECK5-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1141 // CHECK5-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
1142 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
1143 // CHECK5-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
1144 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
1145 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1146 // CHECK5-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
1147 // CHECK5-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
1148 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1149 // CHECK5-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
1150 // CHECK5-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
1151 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
1152 // CHECK5-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
1153 // CHECK5-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
1154 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
1155 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
1156 // CHECK5-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
1157 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1158 // CHECK5-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
1159 // CHECK5-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1160 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
1161 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1162 // CHECK5:       omp.inner.for.inc:
1163 // CHECK5-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1164 // CHECK5-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
1165 // CHECK5-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
1166 // CHECK5-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
1167 // CHECK5-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
1168 // CHECK5-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
1169 // CHECK5-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
1170 // CHECK5-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
1171 // CHECK5-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1172 // CHECK5-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
1173 // CHECK5-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
1174 // CHECK5-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
1175 // CHECK5-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1176 // CHECK5-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1177 // CHECK5-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
1178 // CHECK5-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
1179 // CHECK5:       cond.true18:
1180 // CHECK5-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1181 // CHECK5-NEXT:    br label [[COND_END20:%.*]]
1182 // CHECK5:       cond.false19:
1183 // CHECK5-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
1184 // CHECK5-NEXT:    br label [[COND_END20]]
1185 // CHECK5:       cond.end20:
1186 // CHECK5-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
1187 // CHECK5-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
1188 // CHECK5-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
1189 // CHECK5-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
1190 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
1191 // CHECK5:       omp.inner.for.end:
1192 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1193 // CHECK5:       omp.loop.exit:
1194 // CHECK5-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1195 // CHECK5-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
1196 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
1197 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
1198 // CHECK5:       omp.precond.end:
1199 // CHECK5-NEXT:    ret void
1200 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__9
1201 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
1202 // CHECK5-NEXT:  entry:
1203 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1204 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1205 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1206 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1207 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1208 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
1209 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
1210 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1211 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1212 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1213 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1214 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
1215 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1216 // CHECK5-NEXT:    [[J:%.*]] = alloca i32, align 4
1217 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1218 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1219 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
1220 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1221 // CHECK5-NEXT:    [[I11:%.*]] = alloca i32, align 4
1222 // CHECK5-NEXT:    [[J12:%.*]] = alloca i32, align 4
1223 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1224 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1225 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1226 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1227 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1228 // CHECK5-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
1229 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
1230 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1231 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1232 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1233 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1234 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1235 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1236 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1237 // CHECK5-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
1238 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1239 // CHECK5-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
1240 // CHECK5-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
1241 // CHECK5-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
1242 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
1243 // CHECK5-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
1244 // CHECK5-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
1245 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
1246 // CHECK5-NEXT:    store i32 0, i32* [[J]], align 4
1247 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1248 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
1249 // CHECK5-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
1250 // CHECK5:       land.lhs.true:
1251 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1252 // CHECK5-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
1253 // CHECK5-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
1254 // CHECK5:       omp.precond.then:
1255 // CHECK5-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1256 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1257 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
1258 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1259 // CHECK5-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
1260 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1261 // CHECK5-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
1262 // CHECK5-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
1263 // CHECK5-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
1264 // CHECK5-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
1265 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1266 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1267 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1268 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
1269 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1270 // CHECK5-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
1271 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1272 // CHECK5:       omp.inner.for.cond:
1273 // CHECK5-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1274 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1275 // CHECK5-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
1276 // CHECK5-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
1277 // CHECK5-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1278 // CHECK5:       omp.inner.for.body:
1279 // CHECK5-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1280 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1281 // CHECK5-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
1282 // CHECK5-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
1283 // CHECK5-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
1284 // CHECK5-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
1285 // CHECK5-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
1286 // CHECK5-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
1287 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
1288 // CHECK5-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
1289 // CHECK5-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
1290 // CHECK5-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1291 // CHECK5-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1292 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1293 // CHECK5-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
1294 // CHECK5-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
1295 // CHECK5-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
1296 // CHECK5-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
1297 // CHECK5-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
1298 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1299 // CHECK5-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
1300 // CHECK5-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
1301 // CHECK5-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
1302 // CHECK5-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
1303 // CHECK5-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
1304 // CHECK5-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
1305 // CHECK5-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
1306 // CHECK5-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
1307 // CHECK5-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
1308 // CHECK5-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
1309 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
1310 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
1311 // CHECK5-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1312 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
1313 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
1314 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
1315 // CHECK5-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
1316 // CHECK5-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
1317 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1318 // CHECK5:       omp.body.continue:
1319 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1320 // CHECK5:       omp.inner.for.inc:
1321 // CHECK5-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1322 // CHECK5-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
1323 // CHECK5-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
1324 // CHECK5-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
1325 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
1326 // CHECK5:       omp.inner.for.end:
1327 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1328 // CHECK5:       omp.loop.exit:
1329 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1330 // CHECK5-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
1331 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
1332 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
1333 // CHECK5:       omp.precond.end:
1334 // CHECK5-NEXT:    ret void
1335 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
1336 // CHECK5-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
1337 // CHECK5-NEXT:  entry:
1338 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1339 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1340 // CHECK5-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
1341 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1342 // CHECK5-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1343 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1344 // CHECK5-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
1345 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1346 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1347 // CHECK5-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
1348 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1349 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1350 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
1351 // CHECK5-NEXT:    br label [[DOTEXECUTE:%.*]]
1352 // CHECK5:       .execute:
1353 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
1354 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1355 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
1356 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
1357 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
1358 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1359 // CHECK5-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
1360 // CHECK5-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
1361 // CHECK5:       .omp.deinit:
1362 // CHECK5-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
1363 // CHECK5-NEXT:    br label [[DOTEXIT:%.*]]
1364 // CHECK5:       .exit:
1365 // CHECK5-NEXT:    ret void
1366 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__10
1367 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
1368 // CHECK5-NEXT:  entry:
1369 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1370 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1371 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1372 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1373 // CHECK5-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
1374 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1375 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1376 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1377 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1378 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1379 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1380 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1381 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1382 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1383 // CHECK5-NEXT:    [[I3:%.*]] = alloca i32, align 4
1384 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1385 // CHECK5-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
1386 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1387 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1388 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1389 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1390 // CHECK5-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
1391 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1392 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1393 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1394 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1395 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1396 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1397 // CHECK5-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1398 // CHECK5-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1399 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
1400 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1401 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1402 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1403 // CHECK5:       omp.precond.then:
1404 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1405 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1406 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
1407 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1408 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1409 // CHECK5-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1410 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1411 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1412 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
1413 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1414 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1415 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
1416 // CHECK5-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1417 // CHECK5:       cond.true:
1418 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1419 // CHECK5-NEXT:    br label [[COND_END:%.*]]
1420 // CHECK5:       cond.false:
1421 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1422 // CHECK5-NEXT:    br label [[COND_END]]
1423 // CHECK5:       cond.end:
1424 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
1425 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1426 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1427 // CHECK5-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
1428 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1429 // CHECK5:       omp.inner.for.cond:
1430 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1431 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1432 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
1433 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
1434 // CHECK5-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1435 // CHECK5:       omp.inner.for.body:
1436 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1437 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1438 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
1439 // CHECK5-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
1440 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
1441 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
1442 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1443 // CHECK5-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
1444 // CHECK5-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
1445 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1446 // CHECK5-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
1447 // CHECK5-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
1448 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
1449 // CHECK5-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
1450 // CHECK5-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
1451 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
1452 // CHECK5-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
1453 // CHECK5-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
1454 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
1455 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
1456 // CHECK5-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
1457 // CHECK5-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1458 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
1459 // CHECK5-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1460 // CHECK5-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
1461 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1462 // CHECK5:       omp.inner.for.inc:
1463 // CHECK5-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1464 // CHECK5-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1465 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
1466 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
1467 // CHECK5-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1468 // CHECK5-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1469 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
1470 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
1471 // CHECK5-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1472 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1473 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
1474 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
1475 // CHECK5-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1476 // CHECK5-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1477 // CHECK5-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
1478 // CHECK5-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
1479 // CHECK5:       cond.true10:
1480 // CHECK5-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1481 // CHECK5-NEXT:    br label [[COND_END12:%.*]]
1482 // CHECK5:       cond.false11:
1483 // CHECK5-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1484 // CHECK5-NEXT:    br label [[COND_END12]]
1485 // CHECK5:       cond.end12:
1486 // CHECK5-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
1487 // CHECK5-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
1488 // CHECK5-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1489 // CHECK5-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
1490 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
1491 // CHECK5:       omp.inner.for.end:
1492 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1493 // CHECK5:       omp.loop.exit:
1494 // CHECK5-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1495 // CHECK5-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
1496 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
1497 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
1498 // CHECK5:       omp.precond.end:
1499 // CHECK5-NEXT:    ret void
1500 // CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__11
1501 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
1502 // CHECK5-NEXT:  entry:
1503 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1504 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1505 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1506 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1507 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1508 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1509 // CHECK5-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
1510 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1511 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1512 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1513 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1514 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1515 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1516 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1517 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1518 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1519 // CHECK5-NEXT:    [[I3:%.*]] = alloca i32, align 4
1520 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1521 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1522 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1523 // CHECK5-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1524 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1525 // CHECK5-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1526 // CHECK5-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
1527 // CHECK5-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1528 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1529 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1530 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1531 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1532 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1533 // CHECK5-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1534 // CHECK5-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1535 // CHECK5-NEXT:    store i32 0, i32* [[I]], align 4
1536 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1537 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1538 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1539 // CHECK5:       omp.precond.then:
1540 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1541 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1542 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
1543 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1544 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1545 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
1546 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
1547 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1548 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1549 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1550 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
1551 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1552 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1553 // CHECK5-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
1554 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1555 // CHECK5:       omp.inner.for.cond:
1556 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1557 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1558 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
1559 // CHECK5-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1560 // CHECK5:       omp.inner.for.body:
1561 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1562 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
1563 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1564 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
1565 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
1566 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
1567 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
1568 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
1569 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
1570 // CHECK5-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
1571 // CHECK5-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
1572 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1573 // CHECK5:       omp.body.continue:
1574 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1575 // CHECK5:       omp.inner.for.inc:
1576 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1577 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1578 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
1579 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
1580 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
1581 // CHECK5:       omp.inner.for.end:
1582 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1583 // CHECK5:       omp.loop.exit:
1584 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1585 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
1586 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
1587 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
1588 // CHECK5:       omp.precond.end:
1589 // CHECK5-NEXT:    ret void
1590 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
1591 // CHECK6-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
1592 // CHECK6-NEXT:  entry:
1593 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1594 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1595 // CHECK6-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
1596 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1597 // CHECK6-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
1598 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1599 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1600 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
1601 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1602 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1603 // CHECK6-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
1604 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1605 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1606 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
1607 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
1608 // CHECK6:       .execute:
1609 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
1610 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1611 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
1612 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
1613 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
1614 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
1615 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
1616 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1617 // CHECK6-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
1618 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
1619 // CHECK6:       .omp.deinit:
1620 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
1621 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
1622 // CHECK6:       .exit:
1623 // CHECK6-NEXT:    ret void
1624 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__
1625 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
1626 // CHECK6-NEXT:  entry:
1627 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1628 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1629 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1630 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1631 // CHECK6-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
1632 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1633 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1634 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1635 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1636 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
1637 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1638 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1639 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1640 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1641 // CHECK6-NEXT:    [[I4:%.*]] = alloca i32, align 4
1642 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1643 // CHECK6-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
1644 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
1645 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1646 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1647 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1648 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1649 // CHECK6-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
1650 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1651 // CHECK6-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
1652 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4
1653 // CHECK6-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
1654 // CHECK6-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4
1655 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0
1656 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
1657 // CHECK6-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
1658 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
1659 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
1660 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1661 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
1662 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1663 // CHECK6-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
1664 // CHECK6-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1665 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
1666 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1667 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
1668 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1669 // CHECK6:       omp.precond.then:
1670 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1671 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1672 // CHECK6-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
1673 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1674 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1675 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1676 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1677 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
1678 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1679 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1680 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1681 // CHECK6-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1682 // CHECK6:       cond.true:
1683 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1684 // CHECK6-NEXT:    br label [[COND_END:%.*]]
1685 // CHECK6:       cond.false:
1686 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1687 // CHECK6-NEXT:    br label [[COND_END]]
1688 // CHECK6:       cond.end:
1689 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1690 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1691 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1692 // CHECK6-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1693 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1694 // CHECK6:       omp.inner.for.cond:
1695 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1696 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1697 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
1698 // CHECK6-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
1699 // CHECK6-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1700 // CHECK6:       omp.inner.for.body:
1701 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1702 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1703 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
1704 // CHECK6-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
1705 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
1706 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[L_ADDR]], align 4
1707 // CHECK6-NEXT:    store i32 [[TMP23]], i32* [[L_CASTED]], align 4
1708 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[L_CASTED]], align 4
1709 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1710 // CHECK6-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP19]] to i8*
1711 // CHECK6-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
1712 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1713 // CHECK6-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP20]] to i8*
1714 // CHECK6-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
1715 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
1716 // CHECK6-NEXT:    [[TMP30:%.*]] = inttoptr i32 [[TMP22]] to i8*
1717 // CHECK6-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
1718 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
1719 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
1720 // CHECK6-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 4
1721 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
1722 // CHECK6-NEXT:    [[TMP34:%.*]] = inttoptr i32 [[TMP24]] to i8*
1723 // CHECK6-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 4
1724 // CHECK6-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1725 // CHECK6-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
1726 // CHECK6-NEXT:    [[TMP37:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1727 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP37]], i32 5)
1728 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1729 // CHECK6:       omp.inner.for.inc:
1730 // CHECK6-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1731 // CHECK6-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1732 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
1733 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
1734 // CHECK6-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1735 // CHECK6-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1736 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
1737 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
1738 // CHECK6-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1739 // CHECK6-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1740 // CHECK6-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
1741 // CHECK6-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
1742 // CHECK6-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1743 // CHECK6-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1744 // CHECK6-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP44]], [[TMP45]]
1745 // CHECK6-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
1746 // CHECK6:       cond.true11:
1747 // CHECK6-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1748 // CHECK6-NEXT:    br label [[COND_END13:%.*]]
1749 // CHECK6:       cond.false12:
1750 // CHECK6-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1751 // CHECK6-NEXT:    br label [[COND_END13]]
1752 // CHECK6:       cond.end13:
1753 // CHECK6-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP46]], [[COND_TRUE11]] ], [ [[TMP47]], [[COND_FALSE12]] ]
1754 // CHECK6-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
1755 // CHECK6-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1756 // CHECK6-NEXT:    store i32 [[TMP48]], i32* [[DOTOMP_IV]], align 4
1757 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
1758 // CHECK6:       omp.inner.for.end:
1759 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1760 // CHECK6:       omp.loop.exit:
1761 // CHECK6-NEXT:    [[TMP49:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1762 // CHECK6-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP49]], align 4
1763 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP50]])
1764 // CHECK6-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1765 // CHECK6-NEXT:    [[TMP52:%.*]] = icmp ne i32 [[TMP51]], 0
1766 // CHECK6-NEXT:    br i1 [[TMP52]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1767 // CHECK6:       .omp.lastprivate.then:
1768 // CHECK6-NEXT:    [[TMP53:%.*]] = load i32, i32* [[L_ADDR]], align 4
1769 // CHECK6-NEXT:    store i32 [[TMP53]], i32* [[L_ADDR]], align 4
1770 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1771 // CHECK6:       .omp.lastprivate.done:
1772 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
1773 // CHECK6:       omp.precond.end:
1774 // CHECK6-NEXT:    [[TMP54:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
1775 // CHECK6-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP54]])
1776 // CHECK6-NEXT:    ret void
1777 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__1
1778 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
1779 // CHECK6-NEXT:  entry:
1780 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1781 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1782 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1783 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1784 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1785 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
1786 // CHECK6-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
1787 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1788 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1789 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1790 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1791 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
1792 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1793 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1794 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1795 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1796 // CHECK6-NEXT:    [[I3:%.*]] = alloca i32, align 4
1797 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1798 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1799 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1800 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1801 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1802 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
1803 // CHECK6-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
1804 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
1805 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1806 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1807 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1808 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1809 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1810 // CHECK6-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1811 // CHECK6-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1812 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
1813 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1814 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1815 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1816 // CHECK6:       omp.precond.then:
1817 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1818 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1819 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
1820 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1821 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1822 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
1823 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
1824 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1825 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1826 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1827 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
1828 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
1829 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
1830 // CHECK6:       omp.dispatch.cond:
1831 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1832 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1833 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
1834 // CHECK6-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1835 // CHECK6:       cond.true:
1836 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1837 // CHECK6-NEXT:    br label [[COND_END:%.*]]
1838 // CHECK6:       cond.false:
1839 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1840 // CHECK6-NEXT:    br label [[COND_END]]
1841 // CHECK6:       cond.end:
1842 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
1843 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1844 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1845 // CHECK6-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
1846 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1847 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1848 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
1849 // CHECK6-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1850 // CHECK6:       omp.dispatch.body:
1851 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1852 // CHECK6:       omp.inner.for.cond:
1853 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1854 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1855 // CHECK6-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
1856 // CHECK6-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1857 // CHECK6:       omp.inner.for.body:
1858 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1859 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
1860 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1861 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
1862 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
1863 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
1864 // CHECK6-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
1865 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
1866 // CHECK6-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
1867 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1868 // CHECK6:       omp.body.continue:
1869 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1870 // CHECK6:       omp.inner.for.inc:
1871 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1872 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
1873 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
1874 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
1875 // CHECK6:       omp.inner.for.end:
1876 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
1877 // CHECK6:       omp.dispatch.inc:
1878 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1879 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1880 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
1881 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
1882 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1883 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1884 // CHECK6-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
1885 // CHECK6-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
1886 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
1887 // CHECK6:       omp.dispatch.end:
1888 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1889 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
1890 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
1891 // CHECK6-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1892 // CHECK6-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
1893 // CHECK6-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1894 // CHECK6:       .omp.lastprivate.then:
1895 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
1896 // CHECK6-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
1897 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1898 // CHECK6:       .omp.lastprivate.done:
1899 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
1900 // CHECK6:       omp.precond.end:
1901 // CHECK6-NEXT:    ret void
1902 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
1903 // CHECK6-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
1904 // CHECK6-NEXT:  entry:
1905 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1906 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
1907 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1908 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1909 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1910 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
1911 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1912 // CHECK6-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
1913 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
1914 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1915 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
1916 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
1917 // CHECK6:       .execute:
1918 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
1919 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1920 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
1921 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
1922 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1923 // CHECK6-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
1924 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
1925 // CHECK6:       .omp.deinit:
1926 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
1927 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
1928 // CHECK6:       .exit:
1929 // CHECK6-NEXT:    ret void
1930 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__2
1931 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
1932 // CHECK6-NEXT:  entry:
1933 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1934 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1935 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1936 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
1937 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1938 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1939 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1940 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1941 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
1942 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1943 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1944 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1945 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1946 // CHECK6-NEXT:    [[I3:%.*]] = alloca i32, align 4
1947 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1948 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
1949 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1950 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1951 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1952 // CHECK6-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
1953 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
1954 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1955 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1956 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1957 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1958 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1959 // CHECK6-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1960 // CHECK6-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1961 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
1962 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1963 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1964 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1965 // CHECK6:       omp.precond.then:
1966 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1967 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1968 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
1969 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1970 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1971 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
1972 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1973 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1974 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
1975 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1976 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1977 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
1978 // CHECK6-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1979 // CHECK6:       cond.true:
1980 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1981 // CHECK6-NEXT:    br label [[COND_END:%.*]]
1982 // CHECK6:       cond.false:
1983 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1984 // CHECK6-NEXT:    br label [[COND_END]]
1985 // CHECK6:       cond.end:
1986 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
1987 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1988 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1989 // CHECK6-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
1990 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1991 // CHECK6:       omp.inner.for.cond:
1992 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1993 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1994 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
1995 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
1996 // CHECK6-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1997 // CHECK6:       omp.inner.for.body:
1998 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1999 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2000 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
2001 // CHECK6-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
2002 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
2003 // CHECK6-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2004 // CHECK6-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
2005 // CHECK6-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
2006 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2007 // CHECK6-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
2008 // CHECK6-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
2009 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
2010 // CHECK6-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
2011 // CHECK6-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
2012 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
2013 // CHECK6-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
2014 // CHECK6-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
2015 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2016 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
2017 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2018 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
2019 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2020 // CHECK6:       omp.inner.for.inc:
2021 // CHECK6-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2022 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2023 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
2024 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
2025 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2026 // CHECK6-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2027 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
2028 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
2029 // CHECK6-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2030 // CHECK6-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2031 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
2032 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
2033 // CHECK6-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2034 // CHECK6-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2035 // CHECK6-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
2036 // CHECK6-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
2037 // CHECK6:       cond.true10:
2038 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2039 // CHECK6-NEXT:    br label [[COND_END12:%.*]]
2040 // CHECK6:       cond.false11:
2041 // CHECK6-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2042 // CHECK6-NEXT:    br label [[COND_END12]]
2043 // CHECK6:       cond.end12:
2044 // CHECK6-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
2045 // CHECK6-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
2046 // CHECK6-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2047 // CHECK6-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
2048 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2049 // CHECK6:       omp.inner.for.end:
2050 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2051 // CHECK6:       omp.loop.exit:
2052 // CHECK6-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2053 // CHECK6-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
2054 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
2055 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
2056 // CHECK6:       omp.precond.end:
2057 // CHECK6-NEXT:    ret void
2058 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__3
2059 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
2060 // CHECK6-NEXT:  entry:
2061 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2062 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2063 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2064 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2065 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2066 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
2067 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2068 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2069 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2070 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2071 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2072 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2073 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2074 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2075 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2076 // CHECK6-NEXT:    [[I3:%.*]] = alloca i32, align 4
2077 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2078 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2079 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2080 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2081 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2082 // CHECK6-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
2083 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
2084 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2085 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
2086 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2087 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
2088 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2089 // CHECK6-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2090 // CHECK6-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2091 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
2092 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2093 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
2094 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2095 // CHECK6:       omp.precond.then:
2096 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2097 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2098 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
2099 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2100 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2101 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
2102 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
2103 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2104 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2105 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2106 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
2107 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2108 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2109 // CHECK6-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
2110 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2111 // CHECK6:       omp.inner.for.cond:
2112 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2113 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2114 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
2115 // CHECK6-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2116 // CHECK6:       omp.inner.for.body:
2117 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2118 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
2119 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2120 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
2121 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
2122 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
2123 // CHECK6-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
2124 // CHECK6-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
2125 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
2126 // CHECK6-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
2127 // CHECK6-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
2128 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2129 // CHECK6:       omp.body.continue:
2130 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2131 // CHECK6:       omp.inner.for.inc:
2132 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2133 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2134 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
2135 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
2136 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2137 // CHECK6:       omp.inner.for.end:
2138 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2139 // CHECK6:       omp.loop.exit:
2140 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2141 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
2142 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
2143 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
2144 // CHECK6:       omp.precond.end:
2145 // CHECK6-NEXT:    ret void
2146 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
2147 // CHECK6-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
2148 // CHECK6-NEXT:  entry:
2149 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
2150 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2151 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2152 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
2153 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
2154 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
2155 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2156 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
2157 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
2158 // CHECK6:       .execute:
2159 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2160 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2161 // CHECK6-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
2162 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
2163 // CHECK6:       .omp.deinit:
2164 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
2165 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
2166 // CHECK6:       .exit:
2167 // CHECK6-NEXT:    ret void
2168 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__4
2169 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
2170 // CHECK6-NEXT:  entry:
2171 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2172 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2173 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
2174 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2175 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2176 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2177 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2178 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2179 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2180 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2181 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
2182 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2183 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2184 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
2185 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
2186 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2187 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
2188 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2189 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2190 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2191 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2192 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2193 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
2194 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2195 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
2196 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2197 // CHECK6:       cond.true:
2198 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2199 // CHECK6:       cond.false:
2200 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2201 // CHECK6-NEXT:    br label [[COND_END]]
2202 // CHECK6:       cond.end:
2203 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2204 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2205 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2206 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2207 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2208 // CHECK6:       omp.inner.for.cond:
2209 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2210 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
2211 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2212 // CHECK6:       omp.inner.for.body:
2213 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2214 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2215 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2216 // CHECK6-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
2217 // CHECK6-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
2218 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2219 // CHECK6-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
2220 // CHECK6-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
2221 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
2222 // CHECK6-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
2223 // CHECK6-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
2224 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2225 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
2226 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2227 // CHECK6:       omp.inner.for.inc:
2228 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2229 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2230 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
2231 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2232 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2233 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2234 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
2235 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
2236 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2237 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2238 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2239 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
2240 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2241 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
2242 // CHECK6-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
2243 // CHECK6:       cond.true5:
2244 // CHECK6-NEXT:    br label [[COND_END7:%.*]]
2245 // CHECK6:       cond.false6:
2246 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2247 // CHECK6-NEXT:    br label [[COND_END7]]
2248 // CHECK6:       cond.end7:
2249 // CHECK6-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
2250 // CHECK6-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
2251 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2252 // CHECK6-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
2253 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2254 // CHECK6:       omp.inner.for.end:
2255 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2256 // CHECK6:       omp.loop.exit:
2257 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2258 // CHECK6-NEXT:    ret void
2259 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__5
2260 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
2261 // CHECK6-NEXT:  entry:
2262 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2263 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2264 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2265 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2266 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
2267 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2268 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2269 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2270 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2271 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2272 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2273 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2274 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2275 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2276 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2277 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2278 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
2279 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
2280 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2281 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2282 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2283 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2284 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2285 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2286 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2287 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2288 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2289 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2290 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2291 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2292 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2293 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2294 // CHECK6:       omp.inner.for.cond:
2295 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2296 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2297 // CHECK6-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
2298 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2299 // CHECK6:       omp.inner.for.body:
2300 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2301 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
2302 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2303 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2304 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
2305 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
2306 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
2307 // CHECK6-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
2308 // CHECK6-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
2309 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2310 // CHECK6:       omp.body.continue:
2311 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2312 // CHECK6:       omp.inner.for.inc:
2313 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2314 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2315 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
2316 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
2317 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2318 // CHECK6:       omp.inner.for.end:
2319 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2320 // CHECK6:       omp.loop.exit:
2321 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2322 // CHECK6-NEXT:    ret void
2323 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
2324 // CHECK6-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
2325 // CHECK6-NEXT:  entry:
2326 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2327 // CHECK6-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
2328 // CHECK6-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
2329 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2330 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2331 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
2332 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2333 // CHECK6-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
2334 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2335 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2336 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
2337 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
2338 // CHECK6:       .execute:
2339 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2340 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
2341 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
2342 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
2343 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2344 // CHECK6-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
2345 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
2346 // CHECK6:       .omp.deinit:
2347 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
2348 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
2349 // CHECK6:       .exit:
2350 // CHECK6-NEXT:    ret void
2351 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__6
2352 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
2353 // CHECK6-NEXT:  entry:
2354 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2355 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2356 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2357 // CHECK6-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
2358 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2359 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2360 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2361 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2362 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2363 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2364 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2365 // CHECK6-NEXT:    [[K:%.*]] = alloca i32, align 4
2366 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2367 // CHECK6-NEXT:    [[J:%.*]] = alloca i32, align 4
2368 // CHECK6-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
2369 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
2370 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2371 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2372 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2373 // CHECK6-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
2374 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2375 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2376 // CHECK6-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
2377 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2378 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2379 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2380 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2381 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2382 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
2383 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2384 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
2385 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2386 // CHECK6:       cond.true:
2387 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2388 // CHECK6:       cond.false:
2389 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2390 // CHECK6-NEXT:    br label [[COND_END]]
2391 // CHECK6:       cond.end:
2392 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2393 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2394 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2395 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2396 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2397 // CHECK6:       omp.inner.for.cond:
2398 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2399 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
2400 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2401 // CHECK6:       omp.inner.for.body:
2402 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2403 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2404 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
2405 // CHECK6-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
2406 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
2407 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2408 // CHECK6-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
2409 // CHECK6-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
2410 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2411 // CHECK6-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
2412 // CHECK6-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
2413 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
2414 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
2415 // CHECK6-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
2416 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
2417 // CHECK6-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
2418 // CHECK6-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
2419 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2420 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
2421 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2422 // CHECK6:       omp.inner.for.inc:
2423 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2424 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2425 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2426 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2427 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2428 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2429 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
2430 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
2431 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2432 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2433 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
2434 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
2435 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2436 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
2437 // CHECK6-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
2438 // CHECK6:       cond.true6:
2439 // CHECK6-NEXT:    br label [[COND_END8:%.*]]
2440 // CHECK6:       cond.false7:
2441 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2442 // CHECK6-NEXT:    br label [[COND_END8]]
2443 // CHECK6:       cond.end8:
2444 // CHECK6-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
2445 // CHECK6-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
2446 // CHECK6-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2447 // CHECK6-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
2448 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2449 // CHECK6:       omp.inner.for.end:
2450 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2451 // CHECK6:       omp.loop.exit:
2452 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2453 // CHECK6-NEXT:    ret void
2454 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__7
2455 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
2456 // CHECK6-NEXT:  entry:
2457 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2458 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2459 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2460 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2461 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2462 // CHECK6-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
2463 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2464 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2465 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2466 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2467 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2468 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2469 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2470 // CHECK6-NEXT:    [[K:%.*]] = alloca i32, align 4
2471 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2472 // CHECK6-NEXT:    [[J:%.*]] = alloca i32, align 4
2473 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2474 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2475 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2476 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2477 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2478 // CHECK6-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
2479 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2480 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2481 // CHECK6-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
2482 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2483 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2484 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
2485 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
2486 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2487 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2488 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2489 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
2490 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2491 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2492 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
2493 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2494 // CHECK6:       omp.inner.for.cond:
2495 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2496 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2497 // CHECK6-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
2498 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2499 // CHECK6:       omp.inner.for.body:
2500 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2501 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
2502 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
2503 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2504 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2505 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2506 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2507 // CHECK6-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
2508 // CHECK6-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
2509 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
2510 // CHECK6-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
2511 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
2512 // CHECK6-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
2513 // CHECK6-NEXT:    store i32 10, i32* [[K]], align 4
2514 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
2515 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
2516 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
2517 // CHECK6-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
2518 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
2519 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
2520 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
2521 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
2522 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
2523 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
2524 // CHECK6-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
2525 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
2526 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2527 // CHECK6:       omp.body.continue:
2528 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2529 // CHECK6:       omp.inner.for.inc:
2530 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2531 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2532 // CHECK6-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
2533 // CHECK6-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
2534 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2535 // CHECK6:       omp.inner.for.end:
2536 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2537 // CHECK6:       omp.loop.exit:
2538 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
2539 // CHECK6-NEXT:    ret void
2540 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
2541 // CHECK6-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
2542 // CHECK6-NEXT:  entry:
2543 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2544 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2545 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
2546 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2547 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2548 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
2549 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2550 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2551 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2552 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2553 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
2554 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
2555 // CHECK6:       .execute:
2556 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2557 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
2558 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
2559 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
2560 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2561 // CHECK6-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
2562 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
2563 // CHECK6:       .omp.deinit:
2564 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
2565 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
2566 // CHECK6:       .exit:
2567 // CHECK6-NEXT:    ret void
2568 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__8
2569 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
2570 // CHECK6-NEXT:  entry:
2571 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2572 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2573 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2574 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2575 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
2576 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2577 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2578 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2579 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2580 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
2581 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2582 // CHECK6-NEXT:    [[J:%.*]] = alloca i32, align 4
2583 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
2584 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
2585 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
2586 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2587 // CHECK6-NEXT:    [[I9:%.*]] = alloca i32, align 4
2588 // CHECK6-NEXT:    [[J10:%.*]] = alloca i32, align 4
2589 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
2590 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
2591 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2592 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2593 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2594 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2595 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2596 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2597 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
2598 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
2599 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2600 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2601 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
2602 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2603 // CHECK6-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
2604 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2605 // CHECK6-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
2606 // CHECK6-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
2607 // CHECK6-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
2608 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
2609 // CHECK6-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
2610 // CHECK6-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
2611 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
2612 // CHECK6-NEXT:    store i32 0, i32* [[J]], align 4
2613 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2614 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
2615 // CHECK6-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
2616 // CHECK6:       land.lhs.true:
2617 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2618 // CHECK6-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
2619 // CHECK6-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
2620 // CHECK6:       omp.precond.then:
2621 // CHECK6-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
2622 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2623 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
2624 // CHECK6-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
2625 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2626 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2627 // CHECK6-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
2628 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2629 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2630 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
2631 // CHECK6-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2632 // CHECK6-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2633 // CHECK6-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
2634 // CHECK6-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2635 // CHECK6:       cond.true:
2636 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2637 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2638 // CHECK6:       cond.false:
2639 // CHECK6-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2640 // CHECK6-NEXT:    br label [[COND_END]]
2641 // CHECK6:       cond.end:
2642 // CHECK6-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2643 // CHECK6-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
2644 // CHECK6-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
2645 // CHECK6-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
2646 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2647 // CHECK6:       omp.inner.for.cond:
2648 // CHECK6-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2649 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2650 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
2651 // CHECK6-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
2652 // CHECK6-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2653 // CHECK6:       omp.inner.for.body:
2654 // CHECK6-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
2655 // CHECK6-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
2656 // CHECK6-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2657 // CHECK6-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
2658 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
2659 // CHECK6-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
2660 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
2661 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2662 // CHECK6-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
2663 // CHECK6-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
2664 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2665 // CHECK6-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
2666 // CHECK6-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
2667 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
2668 // CHECK6-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
2669 // CHECK6-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
2670 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
2671 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
2672 // CHECK6-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
2673 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2674 // CHECK6-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
2675 // CHECK6-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2676 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
2677 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2678 // CHECK6:       omp.inner.for.inc:
2679 // CHECK6-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2680 // CHECK6-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
2681 // CHECK6-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
2682 // CHECK6-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
2683 // CHECK6-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
2684 // CHECK6-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
2685 // CHECK6-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
2686 // CHECK6-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
2687 // CHECK6-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2688 // CHECK6-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
2689 // CHECK6-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
2690 // CHECK6-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
2691 // CHECK6-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2692 // CHECK6-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2693 // CHECK6-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
2694 // CHECK6-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
2695 // CHECK6:       cond.true18:
2696 // CHECK6-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2697 // CHECK6-NEXT:    br label [[COND_END20:%.*]]
2698 // CHECK6:       cond.false19:
2699 // CHECK6-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
2700 // CHECK6-NEXT:    br label [[COND_END20]]
2701 // CHECK6:       cond.end20:
2702 // CHECK6-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
2703 // CHECK6-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
2704 // CHECK6-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
2705 // CHECK6-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
2706 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2707 // CHECK6:       omp.inner.for.end:
2708 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2709 // CHECK6:       omp.loop.exit:
2710 // CHECK6-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2711 // CHECK6-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
2712 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
2713 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
2714 // CHECK6:       omp.precond.end:
2715 // CHECK6-NEXT:    ret void
2716 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__9
2717 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
2718 // CHECK6-NEXT:  entry:
2719 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2720 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2721 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2722 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2723 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2724 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
2725 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
2726 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2727 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2728 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2729 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2730 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
2731 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2732 // CHECK6-NEXT:    [[J:%.*]] = alloca i32, align 4
2733 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
2734 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
2735 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
2736 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2737 // CHECK6-NEXT:    [[I11:%.*]] = alloca i32, align 4
2738 // CHECK6-NEXT:    [[J12:%.*]] = alloca i32, align 4
2739 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2740 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2741 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2742 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2743 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2744 // CHECK6-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
2745 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
2746 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2747 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
2748 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
2749 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2750 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2751 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
2752 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2753 // CHECK6-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
2754 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2755 // CHECK6-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
2756 // CHECK6-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
2757 // CHECK6-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
2758 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
2759 // CHECK6-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
2760 // CHECK6-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
2761 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
2762 // CHECK6-NEXT:    store i32 0, i32* [[J]], align 4
2763 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2764 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
2765 // CHECK6-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
2766 // CHECK6:       land.lhs.true:
2767 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2768 // CHECK6-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
2769 // CHECK6-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
2770 // CHECK6:       omp.precond.then:
2771 // CHECK6-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2772 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
2773 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
2774 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2775 // CHECK6-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
2776 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2777 // CHECK6-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
2778 // CHECK6-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
2779 // CHECK6-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
2780 // CHECK6-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
2781 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2782 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2783 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2784 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
2785 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2786 // CHECK6-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
2787 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2788 // CHECK6:       omp.inner.for.cond:
2789 // CHECK6-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2790 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2791 // CHECK6-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
2792 // CHECK6-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
2793 // CHECK6-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2794 // CHECK6:       omp.inner.for.body:
2795 // CHECK6-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2796 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2797 // CHECK6-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
2798 // CHECK6-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
2799 // CHECK6-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
2800 // CHECK6-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
2801 // CHECK6-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
2802 // CHECK6-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
2803 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
2804 // CHECK6-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
2805 // CHECK6-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
2806 // CHECK6-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2807 // CHECK6-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2808 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2809 // CHECK6-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
2810 // CHECK6-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
2811 // CHECK6-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
2812 // CHECK6-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
2813 // CHECK6-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
2814 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2815 // CHECK6-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
2816 // CHECK6-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
2817 // CHECK6-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
2818 // CHECK6-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
2819 // CHECK6-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
2820 // CHECK6-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
2821 // CHECK6-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
2822 // CHECK6-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
2823 // CHECK6-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
2824 // CHECK6-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
2825 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
2826 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
2827 // CHECK6-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2828 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
2829 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
2830 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
2831 // CHECK6-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
2832 // CHECK6-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
2833 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2834 // CHECK6:       omp.body.continue:
2835 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2836 // CHECK6:       omp.inner.for.inc:
2837 // CHECK6-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
2838 // CHECK6-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
2839 // CHECK6-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
2840 // CHECK6-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
2841 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
2842 // CHECK6:       omp.inner.for.end:
2843 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2844 // CHECK6:       omp.loop.exit:
2845 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2846 // CHECK6-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2847 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2848 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
2849 // CHECK6:       omp.precond.end:
2850 // CHECK6-NEXT:    ret void
2851 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
2852 // CHECK6-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
2853 // CHECK6-NEXT:  entry:
2854 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2855 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
2856 // CHECK6-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
2857 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
2858 // CHECK6-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2859 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2860 // CHECK6-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
2861 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2862 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
2863 // CHECK6-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
2864 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
2865 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2866 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
2867 // CHECK6-NEXT:    br label [[DOTEXECUTE:%.*]]
2868 // CHECK6:       .execute:
2869 // CHECK6-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2870 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
2871 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
2872 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
2873 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
2874 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2875 // CHECK6-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
2876 // CHECK6-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
2877 // CHECK6:       .omp.deinit:
2878 // CHECK6-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
2879 // CHECK6-NEXT:    br label [[DOTEXIT:%.*]]
2880 // CHECK6:       .exit:
2881 // CHECK6-NEXT:    ret void
2882 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__10
2883 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
2884 // CHECK6-NEXT:  entry:
2885 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2886 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2887 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2888 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
2889 // CHECK6-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
2890 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2891 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2892 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2893 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2894 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2895 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2896 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2897 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2898 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2899 // CHECK6-NEXT:    [[I3:%.*]] = alloca i32, align 4
2900 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
2901 // CHECK6-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
2902 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2903 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2904 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2905 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
2906 // CHECK6-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
2907 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
2908 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2909 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
2910 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2911 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
2912 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2913 // CHECK6-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2914 // CHECK6-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2915 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
2916 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2917 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
2918 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2919 // CHECK6:       omp.precond.then:
2920 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2921 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2922 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
2923 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2924 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2925 // CHECK6-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
2926 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2927 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2928 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
2929 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2930 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2931 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
2932 // CHECK6-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2933 // CHECK6:       cond.true:
2934 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2935 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2936 // CHECK6:       cond.false:
2937 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2938 // CHECK6-NEXT:    br label [[COND_END]]
2939 // CHECK6:       cond.end:
2940 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
2941 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2942 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2943 // CHECK6-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
2944 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2945 // CHECK6:       omp.inner.for.cond:
2946 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2947 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2948 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
2949 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
2950 // CHECK6-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2951 // CHECK6:       omp.inner.for.body:
2952 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2953 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2954 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
2955 // CHECK6-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
2956 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
2957 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
2958 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2959 // CHECK6-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
2960 // CHECK6-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
2961 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2962 // CHECK6-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
2963 // CHECK6-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
2964 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
2965 // CHECK6-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
2966 // CHECK6-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
2967 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
2968 // CHECK6-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
2969 // CHECK6-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
2970 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
2971 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
2972 // CHECK6-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
2973 // CHECK6-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2974 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
2975 // CHECK6-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2976 // CHECK6-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
2977 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2978 // CHECK6:       omp.inner.for.inc:
2979 // CHECK6-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2980 // CHECK6-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2981 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
2982 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
2983 // CHECK6-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2984 // CHECK6-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2985 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
2986 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
2987 // CHECK6-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2988 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2989 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
2990 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
2991 // CHECK6-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2992 // CHECK6-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2993 // CHECK6-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
2994 // CHECK6-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
2995 // CHECK6:       cond.true10:
2996 // CHECK6-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2997 // CHECK6-NEXT:    br label [[COND_END12:%.*]]
2998 // CHECK6:       cond.false11:
2999 // CHECK6-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3000 // CHECK6-NEXT:    br label [[COND_END12]]
3001 // CHECK6:       cond.end12:
3002 // CHECK6-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
3003 // CHECK6-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
3004 // CHECK6-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3005 // CHECK6-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
3006 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
3007 // CHECK6:       omp.inner.for.end:
3008 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3009 // CHECK6:       omp.loop.exit:
3010 // CHECK6-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3011 // CHECK6-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
3012 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
3013 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
3014 // CHECK6:       omp.precond.end:
3015 // CHECK6-NEXT:    ret void
3016 // CHECK6-LABEL: define {{[^@]+}}@__omp_outlined__11
3017 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
3018 // CHECK6-NEXT:  entry:
3019 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3020 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3021 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3022 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3023 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3024 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
3025 // CHECK6-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
3026 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3027 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3028 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3029 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3030 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
3031 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3032 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3033 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3034 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3035 // CHECK6-NEXT:    [[I3:%.*]] = alloca i32, align 4
3036 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3037 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3038 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3039 // CHECK6-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3040 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3041 // CHECK6-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
3042 // CHECK6-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
3043 // CHECK6-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
3044 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3045 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3046 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3047 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
3048 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3049 // CHECK6-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3050 // CHECK6-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3051 // CHECK6-NEXT:    store i32 0, i32* [[I]], align 4
3052 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3053 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
3054 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3055 // CHECK6:       omp.precond.then:
3056 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3057 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3058 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
3059 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3060 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3061 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
3062 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
3063 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3064 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3065 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3066 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3067 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3068 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3069 // CHECK6-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
3070 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3071 // CHECK6:       omp.inner.for.cond:
3072 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3073 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3074 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
3075 // CHECK6-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3076 // CHECK6:       omp.inner.for.body:
3077 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3078 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
3079 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3080 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
3081 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
3082 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
3083 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
3084 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
3085 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
3086 // CHECK6-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
3087 // CHECK6-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
3088 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3089 // CHECK6:       omp.body.continue:
3090 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3091 // CHECK6:       omp.inner.for.inc:
3092 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3093 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3094 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
3095 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
3096 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
3097 // CHECK6:       omp.inner.for.end:
3098 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3099 // CHECK6:       omp.loop.exit:
3100 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3101 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
3102 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
3103 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
3104 // CHECK6:       omp.precond.end:
3105 // CHECK6-NEXT:    ret void
3106 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
3107 // CHECK7-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
3108 // CHECK7-NEXT:  entry:
3109 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3110 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
3111 // CHECK7-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
3112 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
3113 // CHECK7-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
3114 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3115 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3116 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
3117 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3118 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
3119 // CHECK7-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
3120 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
3121 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3122 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
3123 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
3124 // CHECK7:       .execute:
3125 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
3126 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
3127 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
3128 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
3129 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
3130 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
3131 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
3132 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3133 // CHECK7-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
3134 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
3135 // CHECK7:       .omp.deinit:
3136 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
3137 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
3138 // CHECK7:       .exit:
3139 // CHECK7-NEXT:    ret void
3140 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__
3141 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
3142 // CHECK7-NEXT:  entry:
3143 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3144 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3145 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3146 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
3147 // CHECK7-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
3148 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3149 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3150 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3151 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3152 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3153 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3154 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3155 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3156 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3157 // CHECK7-NEXT:    [[I4:%.*]] = alloca i32, align 4
3158 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
3159 // CHECK7-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
3160 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
3161 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3162 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3163 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3164 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
3165 // CHECK7-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
3166 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
3167 // CHECK7-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1)
3168 // CHECK7-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
3169 // CHECK7-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
3170 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
3171 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
3172 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3173 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
3174 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3175 // CHECK7-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3176 // CHECK7-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3177 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
3178 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3179 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
3180 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3181 // CHECK7:       omp.precond.then:
3182 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3183 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3184 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
3185 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3186 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3187 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3188 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3189 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
3190 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3191 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3192 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
3193 // CHECK7-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3194 // CHECK7:       cond.true:
3195 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3196 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3197 // CHECK7:       cond.false:
3198 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3199 // CHECK7-NEXT:    br label [[COND_END]]
3200 // CHECK7:       cond.end:
3201 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
3202 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3203 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3204 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
3205 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3206 // CHECK7:       omp.inner.for.cond:
3207 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3208 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3209 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
3210 // CHECK7-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
3211 // CHECK7-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3212 // CHECK7:       omp.inner.for.body:
3213 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3214 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3215 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
3216 // CHECK7-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4
3217 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
3218 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[L_ADDR]], align 4
3219 // CHECK7-NEXT:    store i32 [[TMP20]], i32* [[L_CASTED]], align 4
3220 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[L_CASTED]], align 4
3221 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
3222 // CHECK7-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP16]] to i8*
3223 // CHECK7-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
3224 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
3225 // CHECK7-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
3226 // CHECK7-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
3227 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
3228 // CHECK7-NEXT:    [[TMP27:%.*]] = inttoptr i32 [[TMP19]] to i8*
3229 // CHECK7-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
3230 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
3231 // CHECK7-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
3232 // CHECK7-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
3233 // CHECK7-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
3234 // CHECK7-NEXT:    [[TMP31:%.*]] = inttoptr i32 [[TMP21]] to i8*
3235 // CHECK7-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 4
3236 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3237 // CHECK7-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
3238 // CHECK7-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3239 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i32 5)
3240 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3241 // CHECK7:       omp.inner.for.inc:
3242 // CHECK7-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3243 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3244 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
3245 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
3246 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3247 // CHECK7-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3248 // CHECK7-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
3249 // CHECK7-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
3250 // CHECK7-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3251 // CHECK7-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3252 // CHECK7-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
3253 // CHECK7-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
3254 // CHECK7-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3255 // CHECK7-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3256 // CHECK7-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
3257 // CHECK7-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
3258 // CHECK7:       cond.true11:
3259 // CHECK7-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3260 // CHECK7-NEXT:    br label [[COND_END13:%.*]]
3261 // CHECK7:       cond.false12:
3262 // CHECK7-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3263 // CHECK7-NEXT:    br label [[COND_END13]]
3264 // CHECK7:       cond.end13:
3265 // CHECK7-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE11]] ], [ [[TMP44]], [[COND_FALSE12]] ]
3266 // CHECK7-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
3267 // CHECK7-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3268 // CHECK7-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
3269 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3270 // CHECK7:       omp.inner.for.end:
3271 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3272 // CHECK7:       omp.loop.exit:
3273 // CHECK7-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3274 // CHECK7-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
3275 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP47]])
3276 // CHECK7-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3277 // CHECK7-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
3278 // CHECK7-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3279 // CHECK7:       .omp.lastprivate.then:
3280 // CHECK7-NEXT:    [[TMP50:%.*]] = load i32, i32* [[L_ADDR]], align 4
3281 // CHECK7-NEXT:    store i32 [[TMP50]], i32* [[L_ADDR]], align 4
3282 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3283 // CHECK7:       .omp.lastprivate.done:
3284 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
3285 // CHECK7:       omp.precond.end:
3286 // CHECK7-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
3287 // CHECK7-NEXT:    ret void
3288 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__1
3289 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
3290 // CHECK7-NEXT:  entry:
3291 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3292 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3293 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3294 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3295 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3296 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
3297 // CHECK7-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
3298 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3299 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3300 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3301 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3302 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3303 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3304 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3305 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3306 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3307 // CHECK7-NEXT:    [[I3:%.*]] = alloca i32, align 4
3308 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3309 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3310 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3311 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3312 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3313 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
3314 // CHECK7-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
3315 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
3316 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3317 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3318 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3319 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
3320 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3321 // CHECK7-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3322 // CHECK7-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3323 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
3324 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3325 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
3326 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3327 // CHECK7:       omp.precond.then:
3328 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3329 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3330 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
3331 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3332 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3333 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
3334 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
3335 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3336 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3337 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3338 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3339 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
3340 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
3341 // CHECK7:       omp.dispatch.cond:
3342 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3343 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3344 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
3345 // CHECK7-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3346 // CHECK7:       cond.true:
3347 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3348 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3349 // CHECK7:       cond.false:
3350 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3351 // CHECK7-NEXT:    br label [[COND_END]]
3352 // CHECK7:       cond.end:
3353 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
3354 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3355 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3356 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
3357 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3358 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3359 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
3360 // CHECK7-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3361 // CHECK7:       omp.dispatch.body:
3362 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3363 // CHECK7:       omp.inner.for.cond:
3364 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3365 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3366 // CHECK7-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
3367 // CHECK7-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3368 // CHECK7:       omp.inner.for.body:
3369 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3370 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
3371 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3372 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
3373 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
3374 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
3375 // CHECK7-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
3376 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
3377 // CHECK7-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
3378 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3379 // CHECK7:       omp.body.continue:
3380 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3381 // CHECK7:       omp.inner.for.inc:
3382 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3383 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
3384 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
3385 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3386 // CHECK7:       omp.inner.for.end:
3387 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
3388 // CHECK7:       omp.dispatch.inc:
3389 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3390 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3391 // CHECK7-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
3392 // CHECK7-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
3393 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3394 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3395 // CHECK7-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
3396 // CHECK7-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
3397 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
3398 // CHECK7:       omp.dispatch.end:
3399 // CHECK7-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3400 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
3401 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
3402 // CHECK7-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3403 // CHECK7-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
3404 // CHECK7-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3405 // CHECK7:       .omp.lastprivate.then:
3406 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
3407 // CHECK7-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
3408 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3409 // CHECK7:       .omp.lastprivate.done:
3410 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
3411 // CHECK7:       omp.precond.end:
3412 // CHECK7-NEXT:    ret void
3413 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
3414 // CHECK7-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
3415 // CHECK7-NEXT:  entry:
3416 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3417 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
3418 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
3419 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3420 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3421 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
3422 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3423 // CHECK7-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
3424 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
3425 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3426 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
3427 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
3428 // CHECK7:       .execute:
3429 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
3430 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
3431 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
3432 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
3433 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3434 // CHECK7-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
3435 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
3436 // CHECK7:       .omp.deinit:
3437 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
3438 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
3439 // CHECK7:       .exit:
3440 // CHECK7-NEXT:    ret void
3441 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__2
3442 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
3443 // CHECK7-NEXT:  entry:
3444 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3445 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3446 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3447 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
3448 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3449 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3450 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3451 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3452 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3453 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3454 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3455 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3456 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3457 // CHECK7-NEXT:    [[I3:%.*]] = alloca i32, align 4
3458 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
3459 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
3460 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3461 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3462 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3463 // CHECK7-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
3464 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
3465 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3466 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3467 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3468 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
3469 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3470 // CHECK7-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3471 // CHECK7-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3472 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
3473 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3474 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
3475 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3476 // CHECK7:       omp.precond.then:
3477 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3478 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3479 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
3480 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3481 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3482 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3483 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3484 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
3485 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
3486 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3487 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3488 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
3489 // CHECK7-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3490 // CHECK7:       cond.true:
3491 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3492 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3493 // CHECK7:       cond.false:
3494 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3495 // CHECK7-NEXT:    br label [[COND_END]]
3496 // CHECK7:       cond.end:
3497 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
3498 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3499 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3500 // CHECK7-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
3501 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3502 // CHECK7:       omp.inner.for.cond:
3503 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3504 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3505 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
3506 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
3507 // CHECK7-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3508 // CHECK7:       omp.inner.for.body:
3509 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3510 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3511 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
3512 // CHECK7-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
3513 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
3514 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
3515 // CHECK7-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
3516 // CHECK7-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
3517 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
3518 // CHECK7-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
3519 // CHECK7-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
3520 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
3521 // CHECK7-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
3522 // CHECK7-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
3523 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
3524 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
3525 // CHECK7-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
3526 // CHECK7-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3527 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
3528 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3529 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
3530 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3531 // CHECK7:       omp.inner.for.inc:
3532 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3533 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3534 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
3535 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
3536 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3537 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3538 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
3539 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
3540 // CHECK7-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3541 // CHECK7-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3542 // CHECK7-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
3543 // CHECK7-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
3544 // CHECK7-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3545 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3546 // CHECK7-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
3547 // CHECK7-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
3548 // CHECK7:       cond.true10:
3549 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3550 // CHECK7-NEXT:    br label [[COND_END12:%.*]]
3551 // CHECK7:       cond.false11:
3552 // CHECK7-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3553 // CHECK7-NEXT:    br label [[COND_END12]]
3554 // CHECK7:       cond.end12:
3555 // CHECK7-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
3556 // CHECK7-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
3557 // CHECK7-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3558 // CHECK7-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
3559 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3560 // CHECK7:       omp.inner.for.end:
3561 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3562 // CHECK7:       omp.loop.exit:
3563 // CHECK7-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3564 // CHECK7-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
3565 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
3566 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
3567 // CHECK7:       omp.precond.end:
3568 // CHECK7-NEXT:    ret void
3569 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__3
3570 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
3571 // CHECK7-NEXT:  entry:
3572 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3573 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3574 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3575 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3576 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3577 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
3578 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3579 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3580 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3581 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3582 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3583 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3584 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3585 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3586 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3587 // CHECK7-NEXT:    [[I3:%.*]] = alloca i32, align 4
3588 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3589 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3590 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3591 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3592 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3593 // CHECK7-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
3594 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
3595 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3596 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3597 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3598 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
3599 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3600 // CHECK7-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3601 // CHECK7-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3602 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
3603 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3604 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
3605 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3606 // CHECK7:       omp.precond.then:
3607 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3608 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3609 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
3610 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3611 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3612 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
3613 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
3614 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3615 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3616 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3617 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3618 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3619 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3620 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
3621 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3622 // CHECK7:       omp.inner.for.cond:
3623 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3624 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3625 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
3626 // CHECK7-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3627 // CHECK7:       omp.inner.for.body:
3628 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3629 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
3630 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3631 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
3632 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
3633 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
3634 // CHECK7-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
3635 // CHECK7-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
3636 // CHECK7-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
3637 // CHECK7-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
3638 // CHECK7-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
3639 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3640 // CHECK7:       omp.body.continue:
3641 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3642 // CHECK7:       omp.inner.for.inc:
3643 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3644 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3645 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
3646 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
3647 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3648 // CHECK7:       omp.inner.for.end:
3649 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3650 // CHECK7:       omp.loop.exit:
3651 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3652 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
3653 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
3654 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
3655 // CHECK7:       omp.precond.end:
3656 // CHECK7-NEXT:    ret void
3657 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
3658 // CHECK7-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
3659 // CHECK7-NEXT:  entry:
3660 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
3661 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3662 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3663 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
3664 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
3665 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
3666 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3667 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
3668 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
3669 // CHECK7:       .execute:
3670 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
3671 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3672 // CHECK7-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
3673 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
3674 // CHECK7:       .omp.deinit:
3675 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
3676 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
3677 // CHECK7:       .exit:
3678 // CHECK7-NEXT:    ret void
3679 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__4
3680 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
3681 // CHECK7-NEXT:  entry:
3682 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3683 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3684 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
3685 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3686 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3687 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3688 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3689 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3690 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3691 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3692 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
3693 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3694 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3695 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
3696 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
3697 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3698 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
3699 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3700 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3701 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3702 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3703 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3704 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
3705 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3706 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
3707 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3708 // CHECK7:       cond.true:
3709 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3710 // CHECK7:       cond.false:
3711 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3712 // CHECK7-NEXT:    br label [[COND_END]]
3713 // CHECK7:       cond.end:
3714 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3715 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3716 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3717 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3718 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3719 // CHECK7:       omp.inner.for.cond:
3720 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3721 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
3722 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3723 // CHECK7:       omp.inner.for.body:
3724 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3725 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3726 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
3727 // CHECK7-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
3728 // CHECK7-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
3729 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
3730 // CHECK7-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
3731 // CHECK7-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
3732 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
3733 // CHECK7-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
3734 // CHECK7-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
3735 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3736 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
3737 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3738 // CHECK7:       omp.inner.for.inc:
3739 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3740 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3741 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
3742 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
3743 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3744 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3745 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
3746 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
3747 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3748 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3749 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
3750 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
3751 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3752 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
3753 // CHECK7-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
3754 // CHECK7:       cond.true5:
3755 // CHECK7-NEXT:    br label [[COND_END7:%.*]]
3756 // CHECK7:       cond.false6:
3757 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3758 // CHECK7-NEXT:    br label [[COND_END7]]
3759 // CHECK7:       cond.end7:
3760 // CHECK7-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
3761 // CHECK7-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
3762 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3763 // CHECK7-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
3764 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3765 // CHECK7:       omp.inner.for.end:
3766 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3767 // CHECK7:       omp.loop.exit:
3768 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3769 // CHECK7-NEXT:    ret void
3770 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__5
3771 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
3772 // CHECK7-NEXT:  entry:
3773 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3774 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3775 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3776 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3777 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
3778 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3779 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3780 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3781 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3782 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3783 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3784 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3785 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3786 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3787 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3788 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3789 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
3790 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
3791 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3792 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
3793 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3794 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3795 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3796 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3797 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3798 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3799 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3800 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
3801 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3802 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3803 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3804 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3805 // CHECK7:       omp.inner.for.cond:
3806 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3807 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3808 // CHECK7-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
3809 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3810 // CHECK7:       omp.inner.for.body:
3811 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3812 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
3813 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3814 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
3815 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
3816 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
3817 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
3818 // CHECK7-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
3819 // CHECK7-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
3820 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3821 // CHECK7:       omp.body.continue:
3822 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3823 // CHECK7:       omp.inner.for.inc:
3824 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3825 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3826 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
3827 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
3828 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3829 // CHECK7:       omp.inner.for.end:
3830 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3831 // CHECK7:       omp.loop.exit:
3832 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
3833 // CHECK7-NEXT:    ret void
3834 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
3835 // CHECK7-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
3836 // CHECK7-NEXT:  entry:
3837 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
3838 // CHECK7-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
3839 // CHECK7-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
3840 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3841 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3842 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
3843 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
3844 // CHECK7-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
3845 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
3846 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3847 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
3848 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
3849 // CHECK7:       .execute:
3850 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
3851 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
3852 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
3853 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
3854 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3855 // CHECK7-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
3856 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
3857 // CHECK7:       .omp.deinit:
3858 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
3859 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
3860 // CHECK7:       .exit:
3861 // CHECK7-NEXT:    ret void
3862 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__6
3863 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
3864 // CHECK7-NEXT:  entry:
3865 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3866 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3867 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
3868 // CHECK7-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
3869 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3870 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3871 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3872 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3873 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3874 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3875 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3876 // CHECK7-NEXT:    [[K:%.*]] = alloca i32, align 4
3877 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3878 // CHECK7-NEXT:    [[J:%.*]] = alloca i32, align 4
3879 // CHECK7-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
3880 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
3881 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3882 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3883 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
3884 // CHECK7-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
3885 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
3886 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3887 // CHECK7-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
3888 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3889 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3890 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
3891 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3892 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3893 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
3894 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3895 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
3896 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3897 // CHECK7:       cond.true:
3898 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3899 // CHECK7:       cond.false:
3900 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3901 // CHECK7-NEXT:    br label [[COND_END]]
3902 // CHECK7:       cond.end:
3903 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3904 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3905 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3906 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3907 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3908 // CHECK7:       omp.inner.for.cond:
3909 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3910 // CHECK7-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
3911 // CHECK7-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3912 // CHECK7:       omp.inner.for.body:
3913 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3914 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3915 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
3916 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
3917 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
3918 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
3919 // CHECK7-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
3920 // CHECK7-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
3921 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
3922 // CHECK7-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
3923 // CHECK7-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
3924 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
3925 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
3926 // CHECK7-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
3927 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
3928 // CHECK7-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
3929 // CHECK7-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
3930 // CHECK7-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3931 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
3932 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3933 // CHECK7:       omp.inner.for.inc:
3934 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3935 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3936 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
3937 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
3938 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3939 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3940 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
3941 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
3942 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3943 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3944 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
3945 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
3946 // CHECK7-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3947 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
3948 // CHECK7-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
3949 // CHECK7:       cond.true6:
3950 // CHECK7-NEXT:    br label [[COND_END8:%.*]]
3951 // CHECK7:       cond.false7:
3952 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3953 // CHECK7-NEXT:    br label [[COND_END8]]
3954 // CHECK7:       cond.end8:
3955 // CHECK7-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
3956 // CHECK7-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
3957 // CHECK7-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3958 // CHECK7-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
3959 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
3960 // CHECK7:       omp.inner.for.end:
3961 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3962 // CHECK7:       omp.loop.exit:
3963 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3964 // CHECK7-NEXT:    ret void
3965 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__7
3966 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
3967 // CHECK7-NEXT:  entry:
3968 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3969 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3970 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3971 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3972 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
3973 // CHECK7-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
3974 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3975 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3976 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3977 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3978 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3979 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3980 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3981 // CHECK7-NEXT:    [[K:%.*]] = alloca i32, align 4
3982 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3983 // CHECK7-NEXT:    [[J:%.*]] = alloca i32, align 4
3984 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3985 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3986 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3987 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3988 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
3989 // CHECK7-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
3990 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
3991 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3992 // CHECK7-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
3993 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3994 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3995 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
3996 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
3997 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3998 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3999 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4000 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
4001 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4002 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4003 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
4004 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4005 // CHECK7:       omp.inner.for.cond:
4006 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4007 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4008 // CHECK7-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
4009 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4010 // CHECK7:       omp.inner.for.body:
4011 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4012 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
4013 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
4014 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4015 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
4016 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4017 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4018 // CHECK7-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
4019 // CHECK7-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
4020 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
4021 // CHECK7-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
4022 // CHECK7-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
4023 // CHECK7-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
4024 // CHECK7-NEXT:    store i32 10, i32* [[K]], align 4
4025 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
4026 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
4027 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
4028 // CHECK7-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
4029 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
4030 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
4031 // CHECK7-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
4032 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
4033 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
4034 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
4035 // CHECK7-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
4036 // CHECK7-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
4037 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4038 // CHECK7:       omp.body.continue:
4039 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4040 // CHECK7:       omp.inner.for.inc:
4041 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4042 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4043 // CHECK7-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
4044 // CHECK7-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
4045 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
4046 // CHECK7:       omp.inner.for.end:
4047 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4048 // CHECK7:       omp.loop.exit:
4049 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
4050 // CHECK7-NEXT:    ret void
4051 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
4052 // CHECK7-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
4053 // CHECK7-NEXT:  entry:
4054 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4055 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
4056 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4057 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4058 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4059 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
4060 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4061 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
4062 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
4063 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4064 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
4065 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
4066 // CHECK7:       .execute:
4067 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
4068 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4069 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
4070 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
4071 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
4072 // CHECK7-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
4073 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
4074 // CHECK7:       .omp.deinit:
4075 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
4076 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
4077 // CHECK7:       .exit:
4078 // CHECK7-NEXT:    ret void
4079 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__8
4080 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
4081 // CHECK7-NEXT:  entry:
4082 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4083 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4084 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4085 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
4086 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
4087 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4088 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4089 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4090 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4091 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
4092 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4093 // CHECK7-NEXT:    [[J:%.*]] = alloca i32, align 4
4094 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
4095 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
4096 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
4097 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4098 // CHECK7-NEXT:    [[I9:%.*]] = alloca i32, align 4
4099 // CHECK7-NEXT:    [[J10:%.*]] = alloca i32, align 4
4100 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4101 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
4102 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4103 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4104 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4105 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
4106 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
4107 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4108 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4109 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4110 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4111 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4112 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
4113 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4114 // CHECK7-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
4115 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4116 // CHECK7-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
4117 // CHECK7-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
4118 // CHECK7-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
4119 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
4120 // CHECK7-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
4121 // CHECK7-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
4122 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
4123 // CHECK7-NEXT:    store i32 0, i32* [[J]], align 4
4124 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4125 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
4126 // CHECK7-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
4127 // CHECK7:       land.lhs.true:
4128 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4129 // CHECK7-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
4130 // CHECK7-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
4131 // CHECK7:       omp.precond.then:
4132 // CHECK7-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
4133 // CHECK7-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4134 // CHECK7-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
4135 // CHECK7-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
4136 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4137 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4138 // CHECK7-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
4139 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4140 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4141 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
4142 // CHECK7-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4143 // CHECK7-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4144 // CHECK7-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
4145 // CHECK7-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4146 // CHECK7:       cond.true:
4147 // CHECK7-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4148 // CHECK7-NEXT:    br label [[COND_END:%.*]]
4149 // CHECK7:       cond.false:
4150 // CHECK7-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4151 // CHECK7-NEXT:    br label [[COND_END]]
4152 // CHECK7:       cond.end:
4153 // CHECK7-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4154 // CHECK7-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
4155 // CHECK7-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
4156 // CHECK7-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
4157 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4158 // CHECK7:       omp.inner.for.cond:
4159 // CHECK7-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4160 // CHECK7-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4161 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
4162 // CHECK7-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
4163 // CHECK7-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4164 // CHECK7:       omp.inner.for.body:
4165 // CHECK7-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
4166 // CHECK7-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
4167 // CHECK7-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4168 // CHECK7-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
4169 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
4170 // CHECK7-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
4171 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
4172 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
4173 // CHECK7-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
4174 // CHECK7-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
4175 // CHECK7-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
4176 // CHECK7-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
4177 // CHECK7-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
4178 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
4179 // CHECK7-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
4180 // CHECK7-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
4181 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
4182 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
4183 // CHECK7-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
4184 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4185 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
4186 // CHECK7-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
4187 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
4188 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4189 // CHECK7:       omp.inner.for.inc:
4190 // CHECK7-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4191 // CHECK7-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
4192 // CHECK7-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
4193 // CHECK7-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
4194 // CHECK7-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
4195 // CHECK7-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
4196 // CHECK7-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
4197 // CHECK7-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
4198 // CHECK7-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4199 // CHECK7-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
4200 // CHECK7-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
4201 // CHECK7-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
4202 // CHECK7-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4203 // CHECK7-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4204 // CHECK7-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
4205 // CHECK7-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
4206 // CHECK7:       cond.true18:
4207 // CHECK7-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4208 // CHECK7-NEXT:    br label [[COND_END20:%.*]]
4209 // CHECK7:       cond.false19:
4210 // CHECK7-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
4211 // CHECK7-NEXT:    br label [[COND_END20]]
4212 // CHECK7:       cond.end20:
4213 // CHECK7-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
4214 // CHECK7-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
4215 // CHECK7-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
4216 // CHECK7-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
4217 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
4218 // CHECK7:       omp.inner.for.end:
4219 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4220 // CHECK7:       omp.loop.exit:
4221 // CHECK7-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4222 // CHECK7-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
4223 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
4224 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
4225 // CHECK7:       omp.precond.end:
4226 // CHECK7-NEXT:    ret void
4227 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__9
4228 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
4229 // CHECK7-NEXT:  entry:
4230 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4231 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4232 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4233 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4234 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4235 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
4236 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
4237 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4238 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4239 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4240 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4241 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
4242 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4243 // CHECK7-NEXT:    [[J:%.*]] = alloca i32, align 4
4244 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
4245 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
4246 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
4247 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4248 // CHECK7-NEXT:    [[I11:%.*]] = alloca i32, align 4
4249 // CHECK7-NEXT:    [[J12:%.*]] = alloca i32, align 4
4250 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4251 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4252 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4253 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4254 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4255 // CHECK7-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
4256 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
4257 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4258 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4259 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4260 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4261 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4262 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
4263 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4264 // CHECK7-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
4265 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4266 // CHECK7-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
4267 // CHECK7-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
4268 // CHECK7-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
4269 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
4270 // CHECK7-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
4271 // CHECK7-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
4272 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
4273 // CHECK7-NEXT:    store i32 0, i32* [[J]], align 4
4274 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4275 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
4276 // CHECK7-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
4277 // CHECK7:       land.lhs.true:
4278 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4279 // CHECK7-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
4280 // CHECK7-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
4281 // CHECK7:       omp.precond.then:
4282 // CHECK7-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
4283 // CHECK7-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
4284 // CHECK7-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
4285 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4286 // CHECK7-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
4287 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4288 // CHECK7-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
4289 // CHECK7-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
4290 // CHECK7-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
4291 // CHECK7-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
4292 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4293 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4294 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4295 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
4296 // CHECK7-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
4297 // CHECK7-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
4298 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4299 // CHECK7:       omp.inner.for.cond:
4300 // CHECK7-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4301 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4302 // CHECK7-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
4303 // CHECK7-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
4304 // CHECK7-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4305 // CHECK7:       omp.inner.for.body:
4306 // CHECK7-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4307 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4308 // CHECK7-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
4309 // CHECK7-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
4310 // CHECK7-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
4311 // CHECK7-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
4312 // CHECK7-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
4313 // CHECK7-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
4314 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
4315 // CHECK7-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
4316 // CHECK7-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
4317 // CHECK7-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4318 // CHECK7-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4319 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4320 // CHECK7-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
4321 // CHECK7-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
4322 // CHECK7-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
4323 // CHECK7-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
4324 // CHECK7-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
4325 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4326 // CHECK7-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
4327 // CHECK7-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
4328 // CHECK7-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
4329 // CHECK7-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
4330 // CHECK7-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
4331 // CHECK7-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
4332 // CHECK7-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
4333 // CHECK7-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
4334 // CHECK7-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
4335 // CHECK7-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
4336 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
4337 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
4338 // CHECK7-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
4339 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
4340 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
4341 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
4342 // CHECK7-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
4343 // CHECK7-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
4344 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4345 // CHECK7:       omp.body.continue:
4346 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4347 // CHECK7:       omp.inner.for.inc:
4348 // CHECK7-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
4349 // CHECK7-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
4350 // CHECK7-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
4351 // CHECK7-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
4352 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
4353 // CHECK7:       omp.inner.for.end:
4354 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4355 // CHECK7:       omp.loop.exit:
4356 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4357 // CHECK7-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
4358 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
4359 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
4360 // CHECK7:       omp.precond.end:
4361 // CHECK7-NEXT:    ret void
4362 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
4363 // CHECK7-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
4364 // CHECK7-NEXT:  entry:
4365 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4366 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4367 // CHECK7-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
4368 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4369 // CHECK7-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4370 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4371 // CHECK7-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
4372 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4373 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4374 // CHECK7-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
4375 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4376 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4377 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
4378 // CHECK7-NEXT:    br label [[DOTEXECUTE:%.*]]
4379 // CHECK7:       .execute:
4380 // CHECK7-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
4381 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4382 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
4383 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
4384 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
4385 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
4386 // CHECK7-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
4387 // CHECK7-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
4388 // CHECK7:       .omp.deinit:
4389 // CHECK7-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
4390 // CHECK7-NEXT:    br label [[DOTEXIT:%.*]]
4391 // CHECK7:       .exit:
4392 // CHECK7-NEXT:    ret void
4393 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__10
4394 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
4395 // CHECK7-NEXT:  entry:
4396 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4397 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4398 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4399 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4400 // CHECK7-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
4401 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4402 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4403 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4404 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4405 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4406 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4407 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4408 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4409 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4410 // CHECK7-NEXT:    [[I3:%.*]] = alloca i32, align 4
4411 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4412 // CHECK7-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
4413 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4414 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4415 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4416 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4417 // CHECK7-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
4418 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4419 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4420 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4421 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4422 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
4423 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4424 // CHECK7-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4425 // CHECK7-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4426 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
4427 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4428 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
4429 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4430 // CHECK7:       omp.precond.then:
4431 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4432 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4433 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
4434 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4435 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4436 // CHECK7-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4437 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4438 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4439 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
4440 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4441 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4442 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
4443 // CHECK7-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4444 // CHECK7:       cond.true:
4445 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4446 // CHECK7-NEXT:    br label [[COND_END:%.*]]
4447 // CHECK7:       cond.false:
4448 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4449 // CHECK7-NEXT:    br label [[COND_END]]
4450 // CHECK7:       cond.end:
4451 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
4452 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4453 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4454 // CHECK7-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
4455 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4456 // CHECK7:       omp.inner.for.cond:
4457 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4458 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4459 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
4460 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
4461 // CHECK7-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4462 // CHECK7:       omp.inner.for.body:
4463 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4464 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4465 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
4466 // CHECK7-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
4467 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
4468 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
4469 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
4470 // CHECK7-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
4471 // CHECK7-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
4472 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
4473 // CHECK7-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
4474 // CHECK7-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
4475 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
4476 // CHECK7-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
4477 // CHECK7-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
4478 // CHECK7-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
4479 // CHECK7-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
4480 // CHECK7-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
4481 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
4482 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
4483 // CHECK7-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
4484 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4485 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
4486 // CHECK7-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
4487 // CHECK7-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
4488 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4489 // CHECK7:       omp.inner.for.inc:
4490 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4491 // CHECK7-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4492 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
4493 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
4494 // CHECK7-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4495 // CHECK7-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4496 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
4497 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
4498 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4499 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4500 // CHECK7-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
4501 // CHECK7-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
4502 // CHECK7-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4503 // CHECK7-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4504 // CHECK7-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
4505 // CHECK7-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
4506 // CHECK7:       cond.true10:
4507 // CHECK7-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4508 // CHECK7-NEXT:    br label [[COND_END12:%.*]]
4509 // CHECK7:       cond.false11:
4510 // CHECK7-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4511 // CHECK7-NEXT:    br label [[COND_END12]]
4512 // CHECK7:       cond.end12:
4513 // CHECK7-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
4514 // CHECK7-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
4515 // CHECK7-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4516 // CHECK7-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
4517 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
4518 // CHECK7:       omp.inner.for.end:
4519 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4520 // CHECK7:       omp.loop.exit:
4521 // CHECK7-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4522 // CHECK7-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
4523 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
4524 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
4525 // CHECK7:       omp.precond.end:
4526 // CHECK7-NEXT:    ret void
4527 // CHECK7-LABEL: define {{[^@]+}}@__omp_outlined__11
4528 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
4529 // CHECK7-NEXT:  entry:
4530 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4531 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4532 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4533 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4534 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4535 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4536 // CHECK7-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
4537 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4538 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4539 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4540 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4541 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4542 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4543 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4544 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4545 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4546 // CHECK7-NEXT:    [[I3:%.*]] = alloca i32, align 4
4547 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4548 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4549 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4550 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4551 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4552 // CHECK7-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4553 // CHECK7-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
4554 // CHECK7-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4555 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4556 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4557 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4558 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
4559 // CHECK7-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4560 // CHECK7-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4561 // CHECK7-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4562 // CHECK7-NEXT:    store i32 0, i32* [[I]], align 4
4563 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4564 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
4565 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4566 // CHECK7:       omp.precond.then:
4567 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4568 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4569 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
4570 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4571 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4572 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
4573 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
4574 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4575 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4576 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4577 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4578 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4579 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4580 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
4581 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4582 // CHECK7:       omp.inner.for.cond:
4583 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4584 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4585 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
4586 // CHECK7-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4587 // CHECK7:       omp.inner.for.body:
4588 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4589 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
4590 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4591 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
4592 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
4593 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
4594 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
4595 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
4596 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
4597 // CHECK7-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
4598 // CHECK7-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
4599 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4600 // CHECK7:       omp.body.continue:
4601 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4602 // CHECK7:       omp.inner.for.inc:
4603 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4604 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4605 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
4606 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
4607 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]]
4608 // CHECK7:       omp.inner.for.end:
4609 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4610 // CHECK7:       omp.loop.exit:
4611 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4612 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
4613 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
4614 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
4615 // CHECK7:       omp.precond.end:
4616 // CHECK7-NEXT:    ret void
4617 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
4618 // CHECK8-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
4619 // CHECK8-NEXT:  entry:
4620 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4621 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4622 // CHECK8-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
4623 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4624 // CHECK8-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
4625 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4626 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4627 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
4628 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4629 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4630 // CHECK8-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
4631 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4632 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4633 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
4634 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
4635 // CHECK8:       .execute:
4636 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
4637 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4638 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
4639 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
4640 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
4641 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
4642 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
4643 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
4644 // CHECK8-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
4645 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
4646 // CHECK8:       .omp.deinit:
4647 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
4648 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
4649 // CHECK8:       .exit:
4650 // CHECK8-NEXT:    ret void
4651 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__
4652 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
4653 // CHECK8-NEXT:  entry:
4654 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4655 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4656 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4657 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4658 // CHECK8-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
4659 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4660 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4661 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4662 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4663 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4664 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4665 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4666 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4667 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4668 // CHECK8-NEXT:    [[I4:%.*]] = alloca i32, align 4
4669 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4670 // CHECK8-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
4671 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
4672 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4673 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4674 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4675 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4676 // CHECK8-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
4677 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4678 // CHECK8-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1)
4679 // CHECK8-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
4680 // CHECK8-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
4681 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
4682 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
4683 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4684 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
4685 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4686 // CHECK8-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4687 // CHECK8-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4688 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
4689 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4690 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
4691 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4692 // CHECK8:       omp.precond.then:
4693 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4694 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4695 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
4696 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4697 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4698 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4699 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4700 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
4701 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4702 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4703 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
4704 // CHECK8-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4705 // CHECK8:       cond.true:
4706 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4707 // CHECK8-NEXT:    br label [[COND_END:%.*]]
4708 // CHECK8:       cond.false:
4709 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4710 // CHECK8-NEXT:    br label [[COND_END]]
4711 // CHECK8:       cond.end:
4712 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
4713 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4714 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4715 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
4716 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4717 // CHECK8:       omp.inner.for.cond:
4718 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4719 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4720 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
4721 // CHECK8-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
4722 // CHECK8-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4723 // CHECK8:       omp.inner.for.body:
4724 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4725 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4726 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
4727 // CHECK8-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4
4728 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
4729 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[L_ADDR]], align 4
4730 // CHECK8-NEXT:    store i32 [[TMP20]], i32* [[L_CASTED]], align 4
4731 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[L_CASTED]], align 4
4732 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
4733 // CHECK8-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP16]] to i8*
4734 // CHECK8-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
4735 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
4736 // CHECK8-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
4737 // CHECK8-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
4738 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
4739 // CHECK8-NEXT:    [[TMP27:%.*]] = inttoptr i32 [[TMP19]] to i8*
4740 // CHECK8-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
4741 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
4742 // CHECK8-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
4743 // CHECK8-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
4744 // CHECK8-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
4745 // CHECK8-NEXT:    [[TMP31:%.*]] = inttoptr i32 [[TMP21]] to i8*
4746 // CHECK8-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 4
4747 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4748 // CHECK8-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
4749 // CHECK8-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
4750 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i32 5)
4751 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4752 // CHECK8:       omp.inner.for.inc:
4753 // CHECK8-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4754 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4755 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
4756 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
4757 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4758 // CHECK8-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4759 // CHECK8-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
4760 // CHECK8-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
4761 // CHECK8-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4762 // CHECK8-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4763 // CHECK8-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
4764 // CHECK8-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
4765 // CHECK8-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4766 // CHECK8-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4767 // CHECK8-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
4768 // CHECK8-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
4769 // CHECK8:       cond.true11:
4770 // CHECK8-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4771 // CHECK8-NEXT:    br label [[COND_END13:%.*]]
4772 // CHECK8:       cond.false12:
4773 // CHECK8-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4774 // CHECK8-NEXT:    br label [[COND_END13]]
4775 // CHECK8:       cond.end13:
4776 // CHECK8-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE11]] ], [ [[TMP44]], [[COND_FALSE12]] ]
4777 // CHECK8-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
4778 // CHECK8-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4779 // CHECK8-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
4780 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
4781 // CHECK8:       omp.inner.for.end:
4782 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4783 // CHECK8:       omp.loop.exit:
4784 // CHECK8-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4785 // CHECK8-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
4786 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP47]])
4787 // CHECK8-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4788 // CHECK8-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
4789 // CHECK8-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4790 // CHECK8:       .omp.lastprivate.then:
4791 // CHECK8-NEXT:    [[TMP50:%.*]] = load i32, i32* [[L_ADDR]], align 4
4792 // CHECK8-NEXT:    store i32 [[TMP50]], i32* [[L_ADDR]], align 4
4793 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4794 // CHECK8:       .omp.lastprivate.done:
4795 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
4796 // CHECK8:       omp.precond.end:
4797 // CHECK8-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
4798 // CHECK8-NEXT:    ret void
4799 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__1
4800 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
4801 // CHECK8-NEXT:  entry:
4802 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4803 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4804 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4805 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4806 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4807 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
4808 // CHECK8-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
4809 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4810 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4811 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4812 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4813 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4814 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4815 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4816 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4817 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4818 // CHECK8-NEXT:    [[I3:%.*]] = alloca i32, align 4
4819 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4820 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4821 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4822 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4823 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4824 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
4825 // CHECK8-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
4826 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
4827 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4828 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4829 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4830 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
4831 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4832 // CHECK8-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4833 // CHECK8-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4834 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
4835 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4836 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
4837 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4838 // CHECK8:       omp.precond.then:
4839 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4840 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4841 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
4842 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4843 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4844 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
4845 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
4846 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4847 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4848 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4849 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4850 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
4851 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4852 // CHECK8:       omp.dispatch.cond:
4853 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4854 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4855 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
4856 // CHECK8-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4857 // CHECK8:       cond.true:
4858 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4859 // CHECK8-NEXT:    br label [[COND_END:%.*]]
4860 // CHECK8:       cond.false:
4861 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4862 // CHECK8-NEXT:    br label [[COND_END]]
4863 // CHECK8:       cond.end:
4864 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
4865 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4866 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4867 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
4868 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4869 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4870 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
4871 // CHECK8-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4872 // CHECK8:       omp.dispatch.body:
4873 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4874 // CHECK8:       omp.inner.for.cond:
4875 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4876 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4877 // CHECK8-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
4878 // CHECK8-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4879 // CHECK8:       omp.inner.for.body:
4880 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4881 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
4882 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4883 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
4884 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
4885 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
4886 // CHECK8-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
4887 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
4888 // CHECK8-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
4889 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4890 // CHECK8:       omp.body.continue:
4891 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4892 // CHECK8:       omp.inner.for.inc:
4893 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4894 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
4895 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
4896 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
4897 // CHECK8:       omp.inner.for.end:
4898 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4899 // CHECK8:       omp.dispatch.inc:
4900 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4901 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4902 // CHECK8-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
4903 // CHECK8-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
4904 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4905 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4906 // CHECK8-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
4907 // CHECK8-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
4908 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
4909 // CHECK8:       omp.dispatch.end:
4910 // CHECK8-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4911 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
4912 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
4913 // CHECK8-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4914 // CHECK8-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
4915 // CHECK8-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4916 // CHECK8:       .omp.lastprivate.then:
4917 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
4918 // CHECK8-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
4919 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4920 // CHECK8:       .omp.lastprivate.done:
4921 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
4922 // CHECK8:       omp.precond.end:
4923 // CHECK8-NEXT:    ret void
4924 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
4925 // CHECK8-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
4926 // CHECK8-NEXT:  entry:
4927 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4928 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
4929 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4930 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4931 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4932 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
4933 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4934 // CHECK8-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
4935 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
4936 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4937 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
4938 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
4939 // CHECK8:       .execute:
4940 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
4941 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4942 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
4943 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
4944 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
4945 // CHECK8-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
4946 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
4947 // CHECK8:       .omp.deinit:
4948 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
4949 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
4950 // CHECK8:       .exit:
4951 // CHECK8-NEXT:    ret void
4952 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__2
4953 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
4954 // CHECK8-NEXT:  entry:
4955 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4956 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4957 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4958 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
4959 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4960 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4961 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4962 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4963 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4964 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4965 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4966 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4967 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4968 // CHECK8-NEXT:    [[I3:%.*]] = alloca i32, align 4
4969 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4970 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
4971 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4972 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4973 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4974 // CHECK8-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
4975 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
4976 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4977 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
4978 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4979 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
4980 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4981 // CHECK8-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4982 // CHECK8-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4983 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
4984 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4985 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
4986 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4987 // CHECK8:       omp.precond.then:
4988 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4989 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4990 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
4991 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4992 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4993 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
4994 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4995 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4996 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
4997 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4998 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4999 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
5000 // CHECK8-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5001 // CHECK8:       cond.true:
5002 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5003 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5004 // CHECK8:       cond.false:
5005 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5006 // CHECK8-NEXT:    br label [[COND_END]]
5007 // CHECK8:       cond.end:
5008 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
5009 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5010 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5011 // CHECK8-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
5012 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5013 // CHECK8:       omp.inner.for.cond:
5014 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5015 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5016 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
5017 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
5018 // CHECK8-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5019 // CHECK8:       omp.inner.for.body:
5020 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5021 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5022 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
5023 // CHECK8-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
5024 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
5025 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
5026 // CHECK8-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
5027 // CHECK8-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
5028 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
5029 // CHECK8-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
5030 // CHECK8-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
5031 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
5032 // CHECK8-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
5033 // CHECK8-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
5034 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
5035 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
5036 // CHECK8-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
5037 // CHECK8-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5038 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
5039 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
5040 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
5041 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5042 // CHECK8:       omp.inner.for.inc:
5043 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5044 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5045 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
5046 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
5047 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5048 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5049 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
5050 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
5051 // CHECK8-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5052 // CHECK8-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5053 // CHECK8-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
5054 // CHECK8-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
5055 // CHECK8-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5056 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5057 // CHECK8-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
5058 // CHECK8-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
5059 // CHECK8:       cond.true10:
5060 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5061 // CHECK8-NEXT:    br label [[COND_END12:%.*]]
5062 // CHECK8:       cond.false11:
5063 // CHECK8-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5064 // CHECK8-NEXT:    br label [[COND_END12]]
5065 // CHECK8:       cond.end12:
5066 // CHECK8-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
5067 // CHECK8-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
5068 // CHECK8-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5069 // CHECK8-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
5070 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5071 // CHECK8:       omp.inner.for.end:
5072 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5073 // CHECK8:       omp.loop.exit:
5074 // CHECK8-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5075 // CHECK8-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
5076 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
5077 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
5078 // CHECK8:       omp.precond.end:
5079 // CHECK8-NEXT:    ret void
5080 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__3
5081 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
5082 // CHECK8-NEXT:  entry:
5083 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5084 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5085 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5086 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5087 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5088 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
5089 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5090 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5091 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5092 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5093 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5094 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5095 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5096 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5097 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5098 // CHECK8-NEXT:    [[I3:%.*]] = alloca i32, align 4
5099 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5100 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5101 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5102 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5103 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5104 // CHECK8-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
5105 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
5106 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5107 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
5108 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5109 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
5110 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5111 // CHECK8-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5112 // CHECK8-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5113 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
5114 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5115 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
5116 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5117 // CHECK8:       omp.precond.then:
5118 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5119 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5120 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
5121 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5122 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5123 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
5124 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
5125 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5126 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5127 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5128 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
5129 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5130 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5131 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
5132 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5133 // CHECK8:       omp.inner.for.cond:
5134 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5135 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5136 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
5137 // CHECK8-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5138 // CHECK8:       omp.inner.for.body:
5139 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5140 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
5141 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5142 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
5143 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
5144 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
5145 // CHECK8-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
5146 // CHECK8-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
5147 // CHECK8-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
5148 // CHECK8-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
5149 // CHECK8-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
5150 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5151 // CHECK8:       omp.body.continue:
5152 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5153 // CHECK8:       omp.inner.for.inc:
5154 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5155 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5156 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
5157 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
5158 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5159 // CHECK8:       omp.inner.for.end:
5160 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5161 // CHECK8:       omp.loop.exit:
5162 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5163 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
5164 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
5165 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
5166 // CHECK8:       omp.precond.end:
5167 // CHECK8-NEXT:    ret void
5168 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
5169 // CHECK8-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
5170 // CHECK8-NEXT:  entry:
5171 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5172 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
5173 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
5174 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
5175 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5176 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5177 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5178 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
5179 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
5180 // CHECK8:       .execute:
5181 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
5182 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
5183 // CHECK8-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
5184 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
5185 // CHECK8:       .omp.deinit:
5186 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
5187 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
5188 // CHECK8:       .exit:
5189 // CHECK8-NEXT:    ret void
5190 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__4
5191 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
5192 // CHECK8-NEXT:  entry:
5193 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5194 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5195 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5196 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5197 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5198 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5199 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5200 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5201 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5202 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5203 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
5204 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5205 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5206 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5207 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5208 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5209 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
5210 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5211 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5212 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5213 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5214 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5215 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
5216 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5217 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
5218 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5219 // CHECK8:       cond.true:
5220 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5221 // CHECK8:       cond.false:
5222 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5223 // CHECK8-NEXT:    br label [[COND_END]]
5224 // CHECK8:       cond.end:
5225 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5226 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5227 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5228 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5229 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5230 // CHECK8:       omp.inner.for.cond:
5231 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5232 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
5233 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5234 // CHECK8:       omp.inner.for.body:
5235 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5236 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5237 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
5238 // CHECK8-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
5239 // CHECK8-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
5240 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
5241 // CHECK8-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
5242 // CHECK8-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
5243 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
5244 // CHECK8-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
5245 // CHECK8-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
5246 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
5247 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
5248 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5249 // CHECK8:       omp.inner.for.inc:
5250 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5251 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5252 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
5253 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
5254 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5255 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5256 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
5257 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
5258 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5259 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5260 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5261 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
5262 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5263 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
5264 // CHECK8-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
5265 // CHECK8:       cond.true5:
5266 // CHECK8-NEXT:    br label [[COND_END7:%.*]]
5267 // CHECK8:       cond.false6:
5268 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5269 // CHECK8-NEXT:    br label [[COND_END7]]
5270 // CHECK8:       cond.end7:
5271 // CHECK8-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
5272 // CHECK8-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
5273 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5274 // CHECK8-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
5275 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5276 // CHECK8:       omp.inner.for.end:
5277 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5278 // CHECK8:       omp.loop.exit:
5279 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5280 // CHECK8-NEXT:    ret void
5281 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__5
5282 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
5283 // CHECK8-NEXT:  entry:
5284 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5285 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5286 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5287 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5288 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5289 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5290 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5291 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5292 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5293 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5294 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5295 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5296 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5297 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5298 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5299 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5300 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5301 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5302 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5303 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
5304 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5305 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5306 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
5307 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
5308 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5309 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5310 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5311 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
5312 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5313 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5314 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5315 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5316 // CHECK8:       omp.inner.for.cond:
5317 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5318 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5319 // CHECK8-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
5320 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5321 // CHECK8:       omp.inner.for.body:
5322 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5323 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
5324 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5325 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
5326 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
5327 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
5328 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
5329 // CHECK8-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
5330 // CHECK8-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
5331 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5332 // CHECK8:       omp.body.continue:
5333 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5334 // CHECK8:       omp.inner.for.inc:
5335 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5336 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5337 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
5338 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
5339 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5340 // CHECK8:       omp.inner.for.end:
5341 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5342 // CHECK8:       omp.loop.exit:
5343 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
5344 // CHECK8-NEXT:    ret void
5345 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
5346 // CHECK8-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
5347 // CHECK8-NEXT:  entry:
5348 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5349 // CHECK8-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
5350 // CHECK8-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
5351 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
5352 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
5353 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
5354 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5355 // CHECK8-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
5356 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5357 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5358 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
5359 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
5360 // CHECK8:       .execute:
5361 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
5362 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
5363 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
5364 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
5365 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
5366 // CHECK8-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
5367 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
5368 // CHECK8:       .omp.deinit:
5369 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
5370 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
5371 // CHECK8:       .exit:
5372 // CHECK8-NEXT:    ret void
5373 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__6
5374 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
5375 // CHECK8-NEXT:  entry:
5376 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5377 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5378 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5379 // CHECK8-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
5380 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5381 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5382 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5383 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5384 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5385 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5386 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5387 // CHECK8-NEXT:    [[K:%.*]] = alloca i32, align 4
5388 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5389 // CHECK8-NEXT:    [[J:%.*]] = alloca i32, align 4
5390 // CHECK8-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
5391 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
5392 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5393 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5394 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5395 // CHECK8-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
5396 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5397 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5398 // CHECK8-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
5399 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5400 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5401 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5402 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5403 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5404 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
5405 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5406 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
5407 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5408 // CHECK8:       cond.true:
5409 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5410 // CHECK8:       cond.false:
5411 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5412 // CHECK8-NEXT:    br label [[COND_END]]
5413 // CHECK8:       cond.end:
5414 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5415 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5416 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5417 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5418 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5419 // CHECK8:       omp.inner.for.cond:
5420 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5421 // CHECK8-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
5422 // CHECK8-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5423 // CHECK8:       omp.inner.for.body:
5424 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5425 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5426 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
5427 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
5428 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
5429 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
5430 // CHECK8-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
5431 // CHECK8-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
5432 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
5433 // CHECK8-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
5434 // CHECK8-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
5435 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
5436 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
5437 // CHECK8-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
5438 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
5439 // CHECK8-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
5440 // CHECK8-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
5441 // CHECK8-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
5442 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
5443 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5444 // CHECK8:       omp.inner.for.inc:
5445 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5446 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5447 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5448 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
5449 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5450 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5451 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
5452 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
5453 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5454 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5455 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
5456 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
5457 // CHECK8-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5458 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
5459 // CHECK8-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
5460 // CHECK8:       cond.true6:
5461 // CHECK8-NEXT:    br label [[COND_END8:%.*]]
5462 // CHECK8:       cond.false7:
5463 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5464 // CHECK8-NEXT:    br label [[COND_END8]]
5465 // CHECK8:       cond.end8:
5466 // CHECK8-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
5467 // CHECK8-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
5468 // CHECK8-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5469 // CHECK8-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
5470 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5471 // CHECK8:       omp.inner.for.end:
5472 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5473 // CHECK8:       omp.loop.exit:
5474 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5475 // CHECK8-NEXT:    ret void
5476 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__7
5477 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
5478 // CHECK8-NEXT:  entry:
5479 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5480 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5481 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5482 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5483 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5484 // CHECK8-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
5485 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5486 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5487 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5488 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5489 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5490 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5491 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5492 // CHECK8-NEXT:    [[K:%.*]] = alloca i32, align 4
5493 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5494 // CHECK8-NEXT:    [[J:%.*]] = alloca i32, align 4
5495 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5496 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5497 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5498 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5499 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5500 // CHECK8-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
5501 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5502 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5503 // CHECK8-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
5504 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5505 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5506 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
5507 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
5508 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5509 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5510 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5511 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
5512 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5513 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5514 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5515 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5516 // CHECK8:       omp.inner.for.cond:
5517 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5518 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5519 // CHECK8-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
5520 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5521 // CHECK8:       omp.inner.for.body:
5522 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5523 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
5524 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
5525 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5526 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
5527 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5528 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5529 // CHECK8-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
5530 // CHECK8-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
5531 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
5532 // CHECK8-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
5533 // CHECK8-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
5534 // CHECK8-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
5535 // CHECK8-NEXT:    store i32 10, i32* [[K]], align 4
5536 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
5537 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
5538 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
5539 // CHECK8-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
5540 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
5541 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
5542 // CHECK8-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
5543 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
5544 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
5545 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
5546 // CHECK8-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
5547 // CHECK8-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
5548 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5549 // CHECK8:       omp.body.continue:
5550 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5551 // CHECK8:       omp.inner.for.inc:
5552 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5553 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5554 // CHECK8-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
5555 // CHECK8-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
5556 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5557 // CHECK8:       omp.inner.for.end:
5558 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5559 // CHECK8:       omp.loop.exit:
5560 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
5561 // CHECK8-NEXT:    ret void
5562 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
5563 // CHECK8-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
5564 // CHECK8-NEXT:  entry:
5565 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5566 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5567 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
5568 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
5569 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
5570 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
5571 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5572 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5573 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5574 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5575 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
5576 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
5577 // CHECK8:       .execute:
5578 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
5579 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5580 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
5581 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
5582 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
5583 // CHECK8-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
5584 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
5585 // CHECK8:       .omp.deinit:
5586 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
5587 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
5588 // CHECK8:       .exit:
5589 // CHECK8-NEXT:    ret void
5590 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__8
5591 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
5592 // CHECK8-NEXT:  entry:
5593 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5594 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5595 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5596 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5597 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
5598 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5599 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5600 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5601 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5602 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
5603 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5604 // CHECK8-NEXT:    [[J:%.*]] = alloca i32, align 4
5605 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
5606 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
5607 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
5608 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5609 // CHECK8-NEXT:    [[I9:%.*]] = alloca i32, align 4
5610 // CHECK8-NEXT:    [[J10:%.*]] = alloca i32, align 4
5611 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
5612 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
5613 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5614 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5615 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5616 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5617 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5618 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5619 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
5620 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5621 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5622 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5623 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
5624 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5625 // CHECK8-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
5626 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5627 // CHECK8-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
5628 // CHECK8-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
5629 // CHECK8-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
5630 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
5631 // CHECK8-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
5632 // CHECK8-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
5633 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
5634 // CHECK8-NEXT:    store i32 0, i32* [[J]], align 4
5635 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5636 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5637 // CHECK8-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
5638 // CHECK8:       land.lhs.true:
5639 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5640 // CHECK8-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
5641 // CHECK8-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
5642 // CHECK8:       omp.precond.then:
5643 // CHECK8-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
5644 // CHECK8-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5645 // CHECK8-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
5646 // CHECK8-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
5647 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5648 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5649 // CHECK8-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
5650 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5651 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5652 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
5653 // CHECK8-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5654 // CHECK8-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5655 // CHECK8-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
5656 // CHECK8-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5657 // CHECK8:       cond.true:
5658 // CHECK8-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5659 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5660 // CHECK8:       cond.false:
5661 // CHECK8-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5662 // CHECK8-NEXT:    br label [[COND_END]]
5663 // CHECK8:       cond.end:
5664 // CHECK8-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5665 // CHECK8-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
5666 // CHECK8-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
5667 // CHECK8-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
5668 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5669 // CHECK8:       omp.inner.for.cond:
5670 // CHECK8-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5671 // CHECK8-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5672 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
5673 // CHECK8-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
5674 // CHECK8-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5675 // CHECK8:       omp.inner.for.body:
5676 // CHECK8-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
5677 // CHECK8-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
5678 // CHECK8-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5679 // CHECK8-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
5680 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
5681 // CHECK8-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
5682 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
5683 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
5684 // CHECK8-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
5685 // CHECK8-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
5686 // CHECK8-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
5687 // CHECK8-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
5688 // CHECK8-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
5689 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
5690 // CHECK8-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
5691 // CHECK8-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
5692 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
5693 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
5694 // CHECK8-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
5695 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5696 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
5697 // CHECK8-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
5698 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
5699 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5700 // CHECK8:       omp.inner.for.inc:
5701 // CHECK8-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5702 // CHECK8-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
5703 // CHECK8-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
5704 // CHECK8-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
5705 // CHECK8-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
5706 // CHECK8-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
5707 // CHECK8-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
5708 // CHECK8-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
5709 // CHECK8-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5710 // CHECK8-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
5711 // CHECK8-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
5712 // CHECK8-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
5713 // CHECK8-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5714 // CHECK8-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5715 // CHECK8-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
5716 // CHECK8-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
5717 // CHECK8:       cond.true18:
5718 // CHECK8-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5719 // CHECK8-NEXT:    br label [[COND_END20:%.*]]
5720 // CHECK8:       cond.false19:
5721 // CHECK8-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
5722 // CHECK8-NEXT:    br label [[COND_END20]]
5723 // CHECK8:       cond.end20:
5724 // CHECK8-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
5725 // CHECK8-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
5726 // CHECK8-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
5727 // CHECK8-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
5728 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5729 // CHECK8:       omp.inner.for.end:
5730 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5731 // CHECK8:       omp.loop.exit:
5732 // CHECK8-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5733 // CHECK8-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
5734 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
5735 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
5736 // CHECK8:       omp.precond.end:
5737 // CHECK8-NEXT:    ret void
5738 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__9
5739 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
5740 // CHECK8-NEXT:  entry:
5741 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5742 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5743 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5744 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5745 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5746 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
5747 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
5748 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5749 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5750 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5751 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5752 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
5753 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5754 // CHECK8-NEXT:    [[J:%.*]] = alloca i32, align 4
5755 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
5756 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
5757 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
5758 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5759 // CHECK8-NEXT:    [[I11:%.*]] = alloca i32, align 4
5760 // CHECK8-NEXT:    [[J12:%.*]] = alloca i32, align 4
5761 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5762 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5763 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5764 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5765 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5766 // CHECK8-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
5767 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
5768 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5769 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
5770 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5771 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5772 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5773 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
5774 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5775 // CHECK8-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
5776 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5777 // CHECK8-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
5778 // CHECK8-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
5779 // CHECK8-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
5780 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
5781 // CHECK8-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
5782 // CHECK8-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
5783 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
5784 // CHECK8-NEXT:    store i32 0, i32* [[J]], align 4
5785 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5786 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5787 // CHECK8-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
5788 // CHECK8:       land.lhs.true:
5789 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5790 // CHECK8-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
5791 // CHECK8-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
5792 // CHECK8:       omp.precond.then:
5793 // CHECK8-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
5794 // CHECK8-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
5795 // CHECK8-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
5796 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5797 // CHECK8-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
5798 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5799 // CHECK8-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
5800 // CHECK8-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
5801 // CHECK8-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
5802 // CHECK8-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
5803 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5804 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5805 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5806 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
5807 // CHECK8-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
5808 // CHECK8-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
5809 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5810 // CHECK8:       omp.inner.for.cond:
5811 // CHECK8-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5812 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5813 // CHECK8-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
5814 // CHECK8-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
5815 // CHECK8-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5816 // CHECK8:       omp.inner.for.body:
5817 // CHECK8-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5818 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5819 // CHECK8-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
5820 // CHECK8-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
5821 // CHECK8-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
5822 // CHECK8-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
5823 // CHECK8-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
5824 // CHECK8-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
5825 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
5826 // CHECK8-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
5827 // CHECK8-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
5828 // CHECK8-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5829 // CHECK8-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5830 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5831 // CHECK8-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
5832 // CHECK8-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
5833 // CHECK8-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
5834 // CHECK8-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
5835 // CHECK8-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
5836 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5837 // CHECK8-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
5838 // CHECK8-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
5839 // CHECK8-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
5840 // CHECK8-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
5841 // CHECK8-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
5842 // CHECK8-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
5843 // CHECK8-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
5844 // CHECK8-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
5845 // CHECK8-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
5846 // CHECK8-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
5847 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
5848 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
5849 // CHECK8-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5850 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
5851 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
5852 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
5853 // CHECK8-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
5854 // CHECK8-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
5855 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5856 // CHECK8:       omp.body.continue:
5857 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5858 // CHECK8:       omp.inner.for.inc:
5859 // CHECK8-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
5860 // CHECK8-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
5861 // CHECK8-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
5862 // CHECK8-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
5863 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
5864 // CHECK8:       omp.inner.for.end:
5865 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5866 // CHECK8:       omp.loop.exit:
5867 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5868 // CHECK8-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
5869 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
5870 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
5871 // CHECK8:       omp.precond.end:
5872 // CHECK8-NEXT:    ret void
5873 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
5874 // CHECK8-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
5875 // CHECK8-NEXT:  entry:
5876 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5877 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
5878 // CHECK8-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
5879 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
5880 // CHECK8-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
5881 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
5882 // CHECK8-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
5883 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5884 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
5885 // CHECK8-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
5886 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
5887 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5888 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
5889 // CHECK8-NEXT:    br label [[DOTEXECUTE:%.*]]
5890 // CHECK8:       .execute:
5891 // CHECK8-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
5892 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5893 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
5894 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
5895 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
5896 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
5897 // CHECK8-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
5898 // CHECK8-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
5899 // CHECK8:       .omp.deinit:
5900 // CHECK8-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
5901 // CHECK8-NEXT:    br label [[DOTEXIT:%.*]]
5902 // CHECK8:       .exit:
5903 // CHECK8-NEXT:    ret void
5904 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__10
5905 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
5906 // CHECK8-NEXT:  entry:
5907 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5908 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5909 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5910 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
5911 // CHECK8-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
5912 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5913 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5914 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5915 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5916 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5917 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5918 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5919 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5920 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5921 // CHECK8-NEXT:    [[I3:%.*]] = alloca i32, align 4
5922 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
5923 // CHECK8-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
5924 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5925 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5926 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5927 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
5928 // CHECK8-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
5929 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
5930 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5931 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
5932 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5933 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
5934 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5935 // CHECK8-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5936 // CHECK8-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5937 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
5938 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5939 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
5940 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5941 // CHECK8:       omp.precond.then:
5942 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5943 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5944 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
5945 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5946 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5947 // CHECK8-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
5948 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5949 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5950 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
5951 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5952 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5953 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
5954 // CHECK8-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5955 // CHECK8:       cond.true:
5956 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5957 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5958 // CHECK8:       cond.false:
5959 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5960 // CHECK8-NEXT:    br label [[COND_END]]
5961 // CHECK8:       cond.end:
5962 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
5963 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5964 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5965 // CHECK8-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
5966 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5967 // CHECK8:       omp.inner.for.cond:
5968 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5969 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5970 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
5971 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
5972 // CHECK8-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5973 // CHECK8:       omp.inner.for.body:
5974 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5975 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5976 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
5977 // CHECK8-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
5978 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
5979 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
5980 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
5981 // CHECK8-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
5982 // CHECK8-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
5983 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
5984 // CHECK8-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
5985 // CHECK8-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
5986 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
5987 // CHECK8-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
5988 // CHECK8-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
5989 // CHECK8-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
5990 // CHECK8-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
5991 // CHECK8-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
5992 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
5993 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
5994 // CHECK8-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
5995 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5996 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5997 // CHECK8-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
5998 // CHECK8-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
5999 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6000 // CHECK8:       omp.inner.for.inc:
6001 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6002 // CHECK8-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6003 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
6004 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
6005 // CHECK8-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6006 // CHECK8-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6007 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
6008 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
6009 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6010 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6011 // CHECK8-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
6012 // CHECK8-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
6013 // CHECK8-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6014 // CHECK8-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6015 // CHECK8-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
6016 // CHECK8-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
6017 // CHECK8:       cond.true10:
6018 // CHECK8-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6019 // CHECK8-NEXT:    br label [[COND_END12:%.*]]
6020 // CHECK8:       cond.false11:
6021 // CHECK8-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6022 // CHECK8-NEXT:    br label [[COND_END12]]
6023 // CHECK8:       cond.end12:
6024 // CHECK8-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
6025 // CHECK8-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
6026 // CHECK8-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6027 // CHECK8-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
6028 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
6029 // CHECK8:       omp.inner.for.end:
6030 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6031 // CHECK8:       omp.loop.exit:
6032 // CHECK8-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6033 // CHECK8-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
6034 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
6035 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
6036 // CHECK8:       omp.precond.end:
6037 // CHECK8-NEXT:    ret void
6038 // CHECK8-LABEL: define {{[^@]+}}@__omp_outlined__11
6039 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
6040 // CHECK8-NEXT:  entry:
6041 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6042 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6043 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6044 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6045 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6046 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
6047 // CHECK8-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
6048 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6049 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6050 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6051 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6052 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
6053 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6054 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6055 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6056 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6057 // CHECK8-NEXT:    [[I3:%.*]] = alloca i32, align 4
6058 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6059 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6060 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6061 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6062 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6063 // CHECK8-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
6064 // CHECK8-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
6065 // CHECK8-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
6066 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
6067 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
6068 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6069 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
6070 // CHECK8-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6071 // CHECK8-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6072 // CHECK8-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6073 // CHECK8-NEXT:    store i32 0, i32* [[I]], align 4
6074 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6075 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
6076 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6077 // CHECK8:       omp.precond.then:
6078 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6079 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6080 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
6081 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6082 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6083 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
6084 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
6085 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6086 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6087 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6088 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
6089 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6090 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6091 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
6092 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6093 // CHECK8:       omp.inner.for.cond:
6094 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6095 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6096 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
6097 // CHECK8-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6098 // CHECK8:       omp.inner.for.body:
6099 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6100 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
6101 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6102 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
6103 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
6104 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
6105 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
6106 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
6107 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
6108 // CHECK8-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
6109 // CHECK8-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
6110 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6111 // CHECK8:       omp.body.continue:
6112 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6113 // CHECK8:       omp.inner.for.inc:
6114 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6115 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6116 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
6117 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
6118 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]]
6119 // CHECK8:       omp.inner.for.end:
6120 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6121 // CHECK8:       omp.loop.exit:
6122 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6123 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
6124 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
6125 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
6126 // CHECK8:       omp.precond.end:
6127 // CHECK8-NEXT:    ret void
6128 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
6129 // CHECK9-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
6130 // CHECK9-NEXT:  entry:
6131 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6132 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
6133 // CHECK9-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
6134 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
6135 // CHECK9-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
6136 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
6137 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
6138 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
6139 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6140 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
6141 // CHECK9-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
6142 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6143 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
6144 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
6145 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6146 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
6147 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
6148 // CHECK9:       .execute:
6149 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
6150 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
6151 // CHECK9-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
6152 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
6153 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
6154 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
6155 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
6156 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
6157 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[L_CASTED]], align 8
6158 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
6159 // CHECK9-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
6160 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
6161 // CHECK9:       .omp.deinit:
6162 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
6163 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
6164 // CHECK9:       .exit:
6165 // CHECK9-NEXT:    ret void
6166 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__
6167 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
6168 // CHECK9-NEXT:  entry:
6169 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6170 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6171 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6172 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
6173 // CHECK9-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
6174 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6175 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6176 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6177 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
6178 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6179 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6180 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6181 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6182 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6183 // CHECK9-NEXT:    [[I5:%.*]] = alloca i32, align 4
6184 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
6185 // CHECK9-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
6186 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
6187 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6188 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6189 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6190 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
6191 // CHECK9-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
6192 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6193 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
6194 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
6195 // CHECK9-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
6196 // CHECK9-NEXT:    [[TMP2:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8
6197 // CHECK9-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
6198 // CHECK9-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8
6199 // CHECK9-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 0
6200 // CHECK9-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
6201 // CHECK9-NEXT:    [[L2:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
6202 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV]], align 8
6203 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
6204 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6205 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6206 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6207 // CHECK9-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
6208 // CHECK9-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
6209 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6210 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6211 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6212 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6213 // CHECK9:       omp.precond.then:
6214 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6215 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6216 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6217 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6218 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6219 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6220 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6221 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
6222 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6223 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6224 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6225 // CHECK9-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6226 // CHECK9:       cond.true:
6227 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6228 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6229 // CHECK9:       cond.false:
6230 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6231 // CHECK9-NEXT:    br label [[COND_END]]
6232 // CHECK9:       cond.end:
6233 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6234 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6235 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6236 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6237 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6238 // CHECK9:       omp.inner.for.cond:
6239 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6240 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6241 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
6242 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
6243 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6244 // CHECK9:       omp.inner.for.body:
6245 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6246 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6247 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6248 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
6249 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[CONV]], align 8
6250 // CHECK9-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
6251 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV8]], align 4
6252 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[N_CASTED]], align 8
6253 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV1]], align 8
6254 // CHECK9-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
6255 // CHECK9-NEXT:    store i32 [[TMP25]], i32* [[CONV9]], align 4
6256 // CHECK9-NEXT:    [[TMP26:%.*]] = load i64, i64* [[L_CASTED]], align 8
6257 // CHECK9-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
6258 // CHECK9-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP20]] to i8*
6259 // CHECK9-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
6260 // CHECK9-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
6261 // CHECK9-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP22]] to i8*
6262 // CHECK9-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
6263 // CHECK9-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
6264 // CHECK9-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP24]] to i8*
6265 // CHECK9-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 8
6266 // CHECK9-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
6267 // CHECK9-NEXT:    [[TMP34:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
6268 // CHECK9-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 8
6269 // CHECK9-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
6270 // CHECK9-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP26]] to i8*
6271 // CHECK9-NEXT:    store i8* [[TMP36]], i8** [[TMP35]], align 8
6272 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6273 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP37]], align 4
6274 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
6275 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP38]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP39]], i64 5)
6276 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6277 // CHECK9:       omp.inner.for.inc:
6278 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6279 // CHECK9-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6280 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
6281 // CHECK9-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
6282 // CHECK9-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6283 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6284 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
6285 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
6286 // CHECK9-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6287 // CHECK9-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6288 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP44]], [[TMP45]]
6289 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
6290 // CHECK9-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6291 // CHECK9-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6292 // CHECK9-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP46]], [[TMP47]]
6293 // CHECK9-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
6294 // CHECK9:       cond.true14:
6295 // CHECK9-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6296 // CHECK9-NEXT:    br label [[COND_END16:%.*]]
6297 // CHECK9:       cond.false15:
6298 // CHECK9-NEXT:    [[TMP49:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6299 // CHECK9-NEXT:    br label [[COND_END16]]
6300 // CHECK9:       cond.end16:
6301 // CHECK9-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP48]], [[COND_TRUE14]] ], [ [[TMP49]], [[COND_FALSE15]] ]
6302 // CHECK9-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
6303 // CHECK9-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6304 // CHECK9-NEXT:    store i32 [[TMP50]], i32* [[DOTOMP_IV]], align 4
6305 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6306 // CHECK9:       omp.inner.for.end:
6307 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6308 // CHECK9:       omp.loop.exit:
6309 // CHECK9-NEXT:    [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6310 // CHECK9-NEXT:    [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
6311 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]])
6312 // CHECK9-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6313 // CHECK9-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
6314 // CHECK9-NEXT:    br i1 [[TMP54]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
6315 // CHECK9:       .omp.lastprivate.then:
6316 // CHECK9-NEXT:    [[TMP55:%.*]] = load i32, i32* [[CONV1]], align 8
6317 // CHECK9-NEXT:    store i32 [[TMP55]], i32* [[CONV1]], align 8
6318 // CHECK9-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
6319 // CHECK9:       .omp.lastprivate.done:
6320 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6321 // CHECK9:       omp.precond.end:
6322 // CHECK9-NEXT:    [[TMP56:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
6323 // CHECK9-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP56]])
6324 // CHECK9-NEXT:    ret void
6325 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__1
6326 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
6327 // CHECK9-NEXT:  entry:
6328 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6329 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6330 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6331 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6332 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6333 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
6334 // CHECK9-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
6335 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6336 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6337 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6338 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6339 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6340 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6341 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6342 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6343 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6344 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
6345 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6346 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6347 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6348 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6349 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6350 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
6351 // CHECK9-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
6352 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6353 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
6354 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
6355 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
6356 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
6357 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6358 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
6359 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6360 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6361 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6362 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6363 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6364 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
6365 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6366 // CHECK9:       omp.precond.then:
6367 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6368 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6369 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
6370 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6371 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
6372 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6373 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
6374 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
6375 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
6376 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6377 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6378 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6379 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
6380 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
6381 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6382 // CHECK9:       omp.dispatch.cond:
6383 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6384 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP9]] to i64
6385 // CHECK9-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6386 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP10]]
6387 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6388 // CHECK9:       cond.true:
6389 // CHECK9-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6390 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6391 // CHECK9:       cond.false:
6392 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6393 // CHECK9-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP12]] to i64
6394 // CHECK9-NEXT:    br label [[COND_END]]
6395 // CHECK9:       cond.end:
6396 // CHECK9-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP11]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
6397 // CHECK9-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
6398 // CHECK9-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
6399 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6400 // CHECK9-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
6401 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6402 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6403 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
6404 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6405 // CHECK9:       omp.dispatch.body:
6406 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6407 // CHECK9:       omp.inner.for.cond:
6408 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6409 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6410 // CHECK9-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
6411 // CHECK9-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6412 // CHECK9:       omp.inner.for.body:
6413 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6414 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
6415 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6416 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
6417 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
6418 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
6419 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
6420 // CHECK9-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
6421 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
6422 // CHECK9-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
6423 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6424 // CHECK9:       omp.body.continue:
6425 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6426 // CHECK9:       omp.inner.for.inc:
6427 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6428 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP21]], 1
6429 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
6430 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6431 // CHECK9:       omp.inner.for.end:
6432 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6433 // CHECK9:       omp.dispatch.inc:
6434 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6435 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6436 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
6437 // CHECK9-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_LB]], align 4
6438 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6439 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6440 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
6441 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_UB]], align 4
6442 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
6443 // CHECK9:       omp.dispatch.end:
6444 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6445 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
6446 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
6447 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6448 // CHECK9-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
6449 // CHECK9-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
6450 // CHECK9:       .omp.lastprivate.then:
6451 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
6452 // CHECK9-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
6453 // CHECK9-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
6454 // CHECK9:       .omp.lastprivate.done:
6455 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6456 // CHECK9:       omp.precond.end:
6457 // CHECK9-NEXT:    ret void
6458 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
6459 // CHECK9-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
6460 // CHECK9-NEXT:  entry:
6461 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6462 // CHECK9-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
6463 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
6464 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
6465 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
6466 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
6467 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6468 // CHECK9-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
6469 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6470 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
6471 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6472 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
6473 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
6474 // CHECK9:       .execute:
6475 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
6476 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
6477 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
6478 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
6479 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
6480 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
6481 // CHECK9-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
6482 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
6483 // CHECK9:       .omp.deinit:
6484 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
6485 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
6486 // CHECK9:       .exit:
6487 // CHECK9-NEXT:    ret void
6488 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__2
6489 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
6490 // CHECK9-NEXT:  entry:
6491 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6492 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6493 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6494 // CHECK9-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
6495 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6496 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6497 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6498 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6499 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6500 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6501 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6502 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6503 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6504 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
6505 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
6506 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
6507 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6508 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6509 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6510 // CHECK9-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
6511 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6512 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
6513 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
6514 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
6515 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6516 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
6517 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6518 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6519 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6520 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6521 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6522 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
6523 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6524 // CHECK9:       omp.precond.then:
6525 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6526 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6527 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
6528 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6529 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6530 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6531 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6532 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
6533 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
6534 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6535 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6536 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
6537 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6538 // CHECK9:       cond.true:
6539 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6540 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6541 // CHECK9:       cond.false:
6542 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6543 // CHECK9-NEXT:    br label [[COND_END]]
6544 // CHECK9:       cond.end:
6545 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
6546 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6547 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6548 // CHECK9-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
6549 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6550 // CHECK9:       omp.inner.for.cond:
6551 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6552 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6553 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
6554 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
6555 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6556 // CHECK9:       omp.inner.for.body:
6557 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6558 // CHECK9-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
6559 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6560 // CHECK9-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
6561 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
6562 // CHECK9-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
6563 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
6564 // CHECK9-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
6565 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
6566 // CHECK9-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
6567 // CHECK9-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
6568 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
6569 // CHECK9-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
6570 // CHECK9-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
6571 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
6572 // CHECK9-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
6573 // CHECK9-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
6574 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
6575 // CHECK9-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
6576 // CHECK9-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
6577 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6578 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
6579 // CHECK9-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
6580 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
6581 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6582 // CHECK9:       omp.inner.for.inc:
6583 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6584 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6585 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
6586 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
6587 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6588 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6589 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
6590 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
6591 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6592 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6593 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
6594 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
6595 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6596 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6597 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
6598 // CHECK9-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
6599 // CHECK9:       cond.true11:
6600 // CHECK9-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6601 // CHECK9-NEXT:    br label [[COND_END13:%.*]]
6602 // CHECK9:       cond.false12:
6603 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6604 // CHECK9-NEXT:    br label [[COND_END13]]
6605 // CHECK9:       cond.end13:
6606 // CHECK9-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
6607 // CHECK9-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
6608 // CHECK9-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6609 // CHECK9-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
6610 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6611 // CHECK9:       omp.inner.for.end:
6612 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6613 // CHECK9:       omp.loop.exit:
6614 // CHECK9-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6615 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
6616 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP43]])
6617 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6618 // CHECK9:       omp.precond.end:
6619 // CHECK9-NEXT:    ret void
6620 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__3
6621 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
6622 // CHECK9-NEXT:  entry:
6623 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6624 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6625 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6626 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6627 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6628 // CHECK9-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
6629 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6630 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6631 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6632 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6633 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6634 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6635 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6636 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6637 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6638 // CHECK9-NEXT:    [[I5:%.*]] = alloca i32, align 4
6639 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6640 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6641 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6642 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6643 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6644 // CHECK9-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
6645 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6646 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
6647 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
6648 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
6649 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6650 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
6651 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6652 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6653 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6654 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6655 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6656 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
6657 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6658 // CHECK9:       omp.precond.then:
6659 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6660 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6661 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
6662 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6663 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
6664 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6665 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
6666 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
6667 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
6668 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6669 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6670 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6671 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
6672 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6673 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6674 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
6675 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6676 // CHECK9:       omp.inner.for.cond:
6677 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6678 // CHECK9-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
6679 // CHECK9-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6680 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
6681 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6682 // CHECK9:       omp.inner.for.body:
6683 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6684 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
6685 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6686 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
6687 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
6688 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
6689 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
6690 // CHECK9-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
6691 // CHECK9-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
6692 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
6693 // CHECK9-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
6694 // CHECK9-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
6695 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6696 // CHECK9:       omp.body.continue:
6697 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6698 // CHECK9:       omp.inner.for.inc:
6699 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6700 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6701 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
6702 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
6703 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6704 // CHECK9:       omp.inner.for.end:
6705 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6706 // CHECK9:       omp.loop.exit:
6707 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6708 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
6709 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
6710 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6711 // CHECK9:       omp.precond.end:
6712 // CHECK9-NEXT:    ret void
6713 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
6714 // CHECK9-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
6715 // CHECK9-NEXT:  entry:
6716 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
6717 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
6718 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
6719 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
6720 // CHECK9-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
6721 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
6722 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6723 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
6724 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
6725 // CHECK9:       .execute:
6726 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
6727 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
6728 // CHECK9-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
6729 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
6730 // CHECK9:       .omp.deinit:
6731 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
6732 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
6733 // CHECK9:       .exit:
6734 // CHECK9-NEXT:    ret void
6735 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__4
6736 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
6737 // CHECK9-NEXT:  entry:
6738 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6739 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6740 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
6741 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6742 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6743 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6744 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6745 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6746 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6747 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6748 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
6749 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6750 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6751 // CHECK9-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
6752 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
6753 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6754 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
6755 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6756 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6757 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6758 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6759 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6760 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
6761 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6762 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
6763 // CHECK9-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6764 // CHECK9:       cond.true:
6765 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6766 // CHECK9:       cond.false:
6767 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6768 // CHECK9-NEXT:    br label [[COND_END]]
6769 // CHECK9:       cond.end:
6770 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6771 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6772 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6773 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6774 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6775 // CHECK9:       omp.inner.for.cond:
6776 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6777 // CHECK9-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
6778 // CHECK9-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6779 // CHECK9:       omp.inner.for.body:
6780 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6781 // CHECK9-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
6782 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6783 // CHECK9-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
6784 // CHECK9-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
6785 // CHECK9-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
6786 // CHECK9-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
6787 // CHECK9-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
6788 // CHECK9-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
6789 // CHECK9-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
6790 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
6791 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
6792 // CHECK9-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
6793 // CHECK9-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
6794 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
6795 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6796 // CHECK9:       omp.inner.for.inc:
6797 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6798 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6799 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
6800 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
6801 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6802 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6803 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
6804 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
6805 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6806 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6807 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
6808 // CHECK9-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
6809 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6810 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
6811 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
6812 // CHECK9:       cond.true5:
6813 // CHECK9-NEXT:    br label [[COND_END7:%.*]]
6814 // CHECK9:       cond.false6:
6815 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6816 // CHECK9-NEXT:    br label [[COND_END7]]
6817 // CHECK9:       cond.end7:
6818 // CHECK9-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
6819 // CHECK9-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
6820 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6821 // CHECK9-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
6822 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6823 // CHECK9:       omp.inner.for.end:
6824 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6825 // CHECK9:       omp.loop.exit:
6826 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6827 // CHECK9-NEXT:    ret void
6828 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__5
6829 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
6830 // CHECK9-NEXT:  entry:
6831 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6832 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6833 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6834 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6835 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
6836 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6837 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6838 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6839 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6840 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6841 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6842 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6843 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6844 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6845 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6846 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6847 // CHECK9-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
6848 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
6849 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6850 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6851 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6852 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
6853 // CHECK9-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6854 // CHECK9-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
6855 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6856 // CHECK9-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
6857 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6858 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6859 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6860 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
6861 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6862 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6863 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6864 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6865 // CHECK9:       omp.inner.for.cond:
6866 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6867 // CHECK9-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
6868 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6869 // CHECK9-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
6870 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6871 // CHECK9:       omp.inner.for.body:
6872 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6873 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
6874 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6875 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
6876 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
6877 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
6878 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
6879 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
6880 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
6881 // CHECK9-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
6882 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6883 // CHECK9:       omp.body.continue:
6884 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6885 // CHECK9:       omp.inner.for.inc:
6886 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6887 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6888 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
6889 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
6890 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
6891 // CHECK9:       omp.inner.for.end:
6892 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6893 // CHECK9:       omp.loop.exit:
6894 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
6895 // CHECK9-NEXT:    ret void
6896 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
6897 // CHECK9-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
6898 // CHECK9-NEXT:  entry:
6899 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
6900 // CHECK9-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
6901 // CHECK9-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
6902 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
6903 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
6904 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
6905 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
6906 // CHECK9-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
6907 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
6908 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
6909 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6910 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
6911 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
6912 // CHECK9:       .execute:
6913 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
6914 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
6915 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
6916 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
6917 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[F_CASTED]], align 8
6918 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
6919 // CHECK9-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP3]]) #[[ATTR3]]
6920 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
6921 // CHECK9:       .omp.deinit:
6922 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
6923 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
6924 // CHECK9:       .exit:
6925 // CHECK9-NEXT:    ret void
6926 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__6
6927 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
6928 // CHECK9-NEXT:  entry:
6929 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6930 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6931 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
6932 // CHECK9-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
6933 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6934 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6935 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6936 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6937 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6938 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6939 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6940 // CHECK9-NEXT:    [[K:%.*]] = alloca i32, align 4
6941 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6942 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
6943 // CHECK9-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
6944 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
6945 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6946 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6947 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
6948 // CHECK9-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
6949 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
6950 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
6951 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6952 // CHECK9-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
6953 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6954 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6955 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
6956 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6957 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6958 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
6959 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6960 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
6961 // CHECK9-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6962 // CHECK9:       cond.true:
6963 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6964 // CHECK9:       cond.false:
6965 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6966 // CHECK9-NEXT:    br label [[COND_END]]
6967 // CHECK9:       cond.end:
6968 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6969 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6970 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6971 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6972 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6973 // CHECK9:       omp.inner.for.cond:
6974 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6975 // CHECK9-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
6976 // CHECK9-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6977 // CHECK9:       omp.inner.for.body:
6978 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6979 // CHECK9-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
6980 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6981 // CHECK9-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
6982 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
6983 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
6984 // CHECK9-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
6985 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
6986 // CHECK9-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
6987 // CHECK9-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
6988 // CHECK9-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
6989 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
6990 // CHECK9-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
6991 // CHECK9-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
6992 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
6993 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
6994 // CHECK9-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
6995 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
6996 // CHECK9-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
6997 // CHECK9-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
6998 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
6999 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
7000 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7001 // CHECK9:       omp.inner.for.inc:
7002 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7003 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7004 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
7005 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
7006 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7007 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7008 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
7009 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
7010 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7011 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7012 // CHECK9-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
7013 // CHECK9-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
7014 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7015 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
7016 // CHECK9-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
7017 // CHECK9:       cond.true7:
7018 // CHECK9-NEXT:    br label [[COND_END9:%.*]]
7019 // CHECK9:       cond.false8:
7020 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7021 // CHECK9-NEXT:    br label [[COND_END9]]
7022 // CHECK9:       cond.end9:
7023 // CHECK9-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
7024 // CHECK9-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
7025 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7026 // CHECK9-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
7027 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7028 // CHECK9:       omp.inner.for.end:
7029 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7030 // CHECK9:       omp.loop.exit:
7031 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7032 // CHECK9-NEXT:    ret void
7033 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__7
7034 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
7035 // CHECK9-NEXT:  entry:
7036 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7037 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7038 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7039 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7040 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
7041 // CHECK9-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
7042 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7043 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7044 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
7045 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7046 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7047 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7048 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7049 // CHECK9-NEXT:    [[K:%.*]] = alloca i32, align 4
7050 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7051 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
7052 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7053 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7054 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7055 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7056 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
7057 // CHECK9-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
7058 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
7059 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
7060 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7061 // CHECK9-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
7062 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7063 // CHECK9-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
7064 // CHECK9-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7065 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
7066 // CHECK9-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
7067 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
7068 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7069 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7070 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7071 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
7072 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7073 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7074 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7075 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7076 // CHECK9:       omp.inner.for.cond:
7077 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7078 // CHECK9-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
7079 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7080 // CHECK9-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
7081 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7082 // CHECK9:       omp.inner.for.body:
7083 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7084 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
7085 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
7086 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7087 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
7088 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7089 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7090 // CHECK9-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
7091 // CHECK9-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
7092 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
7093 // CHECK9-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
7094 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
7095 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
7096 // CHECK9-NEXT:    store i32 10, i32* [[K]], align 4
7097 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
7098 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
7099 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
7100 // CHECK9-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
7101 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
7102 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
7103 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
7104 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
7105 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
7106 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
7107 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
7108 // CHECK9-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
7109 // CHECK9-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
7110 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
7111 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7112 // CHECK9:       omp.body.continue:
7113 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7114 // CHECK9:       omp.inner.for.inc:
7115 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7116 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7117 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
7118 // CHECK9-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
7119 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7120 // CHECK9:       omp.inner.for.end:
7121 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7122 // CHECK9:       omp.loop.exit:
7123 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
7124 // CHECK9-NEXT:    ret void
7125 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
7126 // CHECK9-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
7127 // CHECK9-NEXT:  entry:
7128 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7129 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
7130 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7131 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
7132 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
7133 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
7134 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7135 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
7136 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7137 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
7138 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
7139 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
7140 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
7141 // CHECK9:       .execute:
7142 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
7143 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
7144 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7145 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
7146 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
7147 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
7148 // CHECK9-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
7149 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
7150 // CHECK9:       .omp.deinit:
7151 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
7152 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
7153 // CHECK9:       .exit:
7154 // CHECK9-NEXT:    ret void
7155 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__8
7156 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
7157 // CHECK9-NEXT:  entry:
7158 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7159 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7160 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7161 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
7162 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
7163 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7164 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
7165 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7166 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7167 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
7168 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7169 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
7170 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
7171 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
7172 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
7173 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7174 // CHECK9-NEXT:    [[I10:%.*]] = alloca i32, align 4
7175 // CHECK9-NEXT:    [[J11:%.*]] = alloca i32, align 4
7176 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7177 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
7178 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7179 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7180 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7181 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
7182 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7183 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
7184 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
7185 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
7186 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
7187 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7188 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7189 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
7190 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7191 // CHECK9-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
7192 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7193 // CHECK9-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
7194 // CHECK9-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
7195 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
7196 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
7197 // CHECK9-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
7198 // CHECK9-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
7199 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7200 // CHECK9-NEXT:    store i32 0, i32* [[J]], align 4
7201 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7202 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
7203 // CHECK9-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
7204 // CHECK9:       land.lhs.true:
7205 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7206 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
7207 // CHECK9-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
7208 // CHECK9:       omp.precond.then:
7209 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
7210 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7211 // CHECK9-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
7212 // CHECK9-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
7213 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7214 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
7215 // CHECK9-NEXT:    [[CONV12:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
7216 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7217 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7218 // CHECK9-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV12]])
7219 // CHECK9-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7220 // CHECK9-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7221 // CHECK9-NEXT:    [[CMP13:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
7222 // CHECK9-NEXT:    br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7223 // CHECK9:       cond.true:
7224 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7225 // CHECK9-NEXT:    br label [[COND_END:%.*]]
7226 // CHECK9:       cond.false:
7227 // CHECK9-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7228 // CHECK9-NEXT:    br label [[COND_END]]
7229 // CHECK9:       cond.end:
7230 // CHECK9-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7231 // CHECK9-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
7232 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
7233 // CHECK9-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
7234 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7235 // CHECK9:       omp.inner.for.cond:
7236 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7237 // CHECK9-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7238 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
7239 // CHECK9-NEXT:    [[CMP14:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
7240 // CHECK9-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7241 // CHECK9:       omp.inner.for.body:
7242 // CHECK9-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
7243 // CHECK9-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7244 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
7245 // CHECK9-NEXT:    [[CONV15:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7246 // CHECK9-NEXT:    store i32 [[TMP19]], i32* [[CONV15]], align 4
7247 // CHECK9-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
7248 // CHECK9-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
7249 // CHECK9-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP17]] to i8*
7250 // CHECK9-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
7251 // CHECK9-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
7252 // CHECK9-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
7253 // CHECK9-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
7254 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
7255 // CHECK9-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
7256 // CHECK9-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
7257 // CHECK9-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
7258 // CHECK9-NEXT:    [[TMP28:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
7259 // CHECK9-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
7260 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7261 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
7262 // CHECK9-NEXT:    [[TMP31:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
7263 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP31]], i64 4)
7264 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7265 // CHECK9:       omp.inner.for.inc:
7266 // CHECK9-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7267 // CHECK9-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
7268 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP32]], [[TMP33]]
7269 // CHECK9-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_IV]], align 8
7270 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
7271 // CHECK9-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
7272 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
7273 // CHECK9-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_COMB_LB]], align 8
7274 // CHECK9-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7275 // CHECK9-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
7276 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
7277 // CHECK9-NEXT:    store i64 [[ADD18]], i64* [[DOTOMP_COMB_UB]], align 8
7278 // CHECK9-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7279 // CHECK9-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7280 // CHECK9-NEXT:    [[CMP19:%.*]] = icmp sgt i64 [[TMP38]], [[TMP39]]
7281 // CHECK9-NEXT:    br i1 [[CMP19]], label [[COND_TRUE20:%.*]], label [[COND_FALSE21:%.*]]
7282 // CHECK9:       cond.true20:
7283 // CHECK9-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7284 // CHECK9-NEXT:    br label [[COND_END22:%.*]]
7285 // CHECK9:       cond.false21:
7286 // CHECK9-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
7287 // CHECK9-NEXT:    br label [[COND_END22]]
7288 // CHECK9:       cond.end22:
7289 // CHECK9-NEXT:    [[COND23:%.*]] = phi i64 [ [[TMP40]], [[COND_TRUE20]] ], [ [[TMP41]], [[COND_FALSE21]] ]
7290 // CHECK9-NEXT:    store i64 [[COND23]], i64* [[DOTOMP_COMB_UB]], align 8
7291 // CHECK9-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
7292 // CHECK9-NEXT:    store i64 [[TMP42]], i64* [[DOTOMP_IV]], align 8
7293 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7294 // CHECK9:       omp.inner.for.end:
7295 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7296 // CHECK9:       omp.loop.exit:
7297 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7298 // CHECK9-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
7299 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
7300 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7301 // CHECK9:       omp.precond.end:
7302 // CHECK9-NEXT:    ret void
7303 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__9
7304 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
7305 // CHECK9-NEXT:  entry:
7306 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7307 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7308 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7309 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7310 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7311 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
7312 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
7313 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7314 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
7315 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7316 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7317 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
7318 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7319 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
7320 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
7321 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
7322 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
7323 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7324 // CHECK9-NEXT:    [[I10:%.*]] = alloca i32, align 4
7325 // CHECK9-NEXT:    [[J11:%.*]] = alloca i32, align 4
7326 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7327 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7328 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7329 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7330 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7331 // CHECK9-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
7332 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7333 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
7334 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
7335 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
7336 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
7337 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7338 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7339 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
7340 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7341 // CHECK9-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
7342 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7343 // CHECK9-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
7344 // CHECK9-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
7345 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
7346 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
7347 // CHECK9-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
7348 // CHECK9-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
7349 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7350 // CHECK9-NEXT:    store i32 0, i32* [[J]], align 4
7351 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7352 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
7353 // CHECK9-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
7354 // CHECK9:       land.lhs.true:
7355 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7356 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
7357 // CHECK9-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
7358 // CHECK9:       omp.precond.then:
7359 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
7360 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
7361 // CHECK9-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
7362 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7363 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7364 // CHECK9-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_LB]], align 8
7365 // CHECK9-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
7366 // CHECK9-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
7367 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7368 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7369 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7370 // CHECK9-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
7371 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
7372 // CHECK9-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
7373 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7374 // CHECK9:       omp.inner.for.cond:
7375 // CHECK9-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7376 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7377 // CHECK9-NEXT:    [[CMP12:%.*]] = icmp ule i64 [[TMP13]], [[TMP14]]
7378 // CHECK9-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7379 // CHECK9:       omp.inner.for.body:
7380 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7381 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7382 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP16]], 0
7383 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
7384 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 1, [[DIV14]]
7385 // CHECK9-NEXT:    [[CONV16:%.*]] = sext i32 [[MUL15]] to i64
7386 // CHECK9-NEXT:    [[DIV17:%.*]] = sdiv i64 [[TMP15]], [[CONV16]]
7387 // CHECK9-NEXT:    [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 1
7388 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL18]]
7389 // CHECK9-NEXT:    [[CONV19:%.*]] = trunc i64 [[ADD]] to i32
7390 // CHECK9-NEXT:    store i32 [[CONV19]], i32* [[I10]], align 4
7391 // CHECK9-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7392 // CHECK9-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7393 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7394 // CHECK9-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP19]], 0
7395 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
7396 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i32 1, [[DIV21]]
7397 // CHECK9-NEXT:    [[CONV23:%.*]] = sext i32 [[MUL22]] to i64
7398 // CHECK9-NEXT:    [[DIV24:%.*]] = sdiv i64 [[TMP18]], [[CONV23]]
7399 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7400 // CHECK9-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[TMP20]], 0
7401 // CHECK9-NEXT:    [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1
7402 // CHECK9-NEXT:    [[MUL27:%.*]] = mul nsw i32 1, [[DIV26]]
7403 // CHECK9-NEXT:    [[CONV28:%.*]] = sext i32 [[MUL27]] to i64
7404 // CHECK9-NEXT:    [[MUL29:%.*]] = mul nsw i64 [[DIV24]], [[CONV28]]
7405 // CHECK9-NEXT:    [[SUB30:%.*]] = sub nsw i64 [[TMP17]], [[MUL29]]
7406 // CHECK9-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[SUB30]], 1
7407 // CHECK9-NEXT:    [[ADD32:%.*]] = add nsw i64 0, [[MUL31]]
7408 // CHECK9-NEXT:    [[CONV33:%.*]] = trunc i64 [[ADD32]] to i32
7409 // CHECK9-NEXT:    store i32 [[CONV33]], i32* [[J11]], align 4
7410 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
7411 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
7412 // CHECK9-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7413 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
7414 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
7415 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
7416 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
7417 // CHECK9-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP24]] to i64
7418 // CHECK9-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM35]]
7419 // CHECK9-NEXT:    store i32 [[ADD34]], i32* [[ARRAYIDX36]], align 4
7420 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7421 // CHECK9:       omp.body.continue:
7422 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7423 // CHECK9:       omp.inner.for.inc:
7424 // CHECK9-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7425 // CHECK9-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
7426 // CHECK9-NEXT:    [[ADD37:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
7427 // CHECK9-NEXT:    store i64 [[ADD37]], i64* [[DOTOMP_IV]], align 8
7428 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7429 // CHECK9:       omp.inner.for.end:
7430 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7431 // CHECK9:       omp.loop.exit:
7432 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7433 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
7434 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
7435 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7436 // CHECK9:       omp.precond.end:
7437 // CHECK9-NEXT:    ret void
7438 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
7439 // CHECK9-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
7440 // CHECK9-NEXT:  entry:
7441 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7442 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7443 // CHECK9-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
7444 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7445 // CHECK9-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
7446 // CHECK9-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
7447 // CHECK9-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
7448 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7449 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7450 // CHECK9-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
7451 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7452 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7453 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
7454 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
7455 // CHECK9-NEXT:    br label [[DOTEXECUTE:%.*]]
7456 // CHECK9:       .execute:
7457 // CHECK9-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
7458 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
7459 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7460 // CHECK9-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
7461 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
7462 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 8
7463 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
7464 // CHECK9-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
7465 // CHECK9-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
7466 // CHECK9:       .omp.deinit:
7467 // CHECK9-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
7468 // CHECK9-NEXT:    br label [[DOTEXIT:%.*]]
7469 // CHECK9:       .exit:
7470 // CHECK9-NEXT:    ret void
7471 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__10
7472 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
7473 // CHECK9-NEXT:  entry:
7474 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7475 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7476 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7477 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7478 // CHECK9-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
7479 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7480 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7481 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7482 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7483 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7484 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7485 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7486 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7487 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7488 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
7489 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7490 // CHECK9-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
7491 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7492 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7493 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7494 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7495 // CHECK9-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
7496 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7497 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7498 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
7499 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
7500 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7501 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
7502 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7503 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7504 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7505 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7506 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7507 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
7508 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7509 // CHECK9:       omp.precond.then:
7510 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7511 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7512 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
7513 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7514 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7515 // CHECK9-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
7516 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7517 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
7518 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
7519 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7520 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7521 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
7522 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7523 // CHECK9:       cond.true:
7524 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7525 // CHECK9-NEXT:    br label [[COND_END:%.*]]
7526 // CHECK9:       cond.false:
7527 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7528 // CHECK9-NEXT:    br label [[COND_END]]
7529 // CHECK9:       cond.end:
7530 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
7531 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7532 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7533 // CHECK9-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
7534 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7535 // CHECK9:       omp.inner.for.cond:
7536 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7537 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7538 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
7539 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
7540 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7541 // CHECK9:       omp.inner.for.body:
7542 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7543 // CHECK9-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
7544 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7545 // CHECK9-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
7546 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
7547 // CHECK9-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7548 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
7549 // CHECK9-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
7550 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
7551 // CHECK9-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
7552 // CHECK9-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
7553 // CHECK9-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
7554 // CHECK9-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
7555 // CHECK9-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
7556 // CHECK9-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
7557 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
7558 // CHECK9-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
7559 // CHECK9-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
7560 // CHECK9-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
7561 // CHECK9-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
7562 // CHECK9-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
7563 // CHECK9-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
7564 // CHECK9-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
7565 // CHECK9-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
7566 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7567 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
7568 // CHECK9-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
7569 // CHECK9-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
7570 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7571 // CHECK9:       omp.inner.for.inc:
7572 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7573 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7574 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
7575 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
7576 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7577 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7578 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
7579 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
7580 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7581 // CHECK9-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7582 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
7583 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
7584 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7585 // CHECK9-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7586 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
7587 // CHECK9-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
7588 // CHECK9:       cond.true11:
7589 // CHECK9-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7590 // CHECK9-NEXT:    br label [[COND_END13:%.*]]
7591 // CHECK9:       cond.false12:
7592 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7593 // CHECK9-NEXT:    br label [[COND_END13]]
7594 // CHECK9:       cond.end13:
7595 // CHECK9-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
7596 // CHECK9-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
7597 // CHECK9-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7598 // CHECK9-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
7599 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7600 // CHECK9:       omp.inner.for.end:
7601 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7602 // CHECK9:       omp.loop.exit:
7603 // CHECK9-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7604 // CHECK9-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
7605 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
7606 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7607 // CHECK9:       omp.precond.end:
7608 // CHECK9-NEXT:    ret void
7609 // CHECK9-LABEL: define {{[^@]+}}@__omp_outlined__11
7610 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
7611 // CHECK9-NEXT:  entry:
7612 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7613 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7614 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7615 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7616 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7617 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7618 // CHECK9-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
7619 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7620 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7621 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7622 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7623 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7624 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7625 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7626 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7627 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7628 // CHECK9-NEXT:    [[I5:%.*]] = alloca i32, align 4
7629 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7630 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7631 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7632 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7633 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7634 // CHECK9-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7635 // CHECK9-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
7636 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7637 // CHECK9-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7638 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
7639 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
7640 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7641 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
7642 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7643 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7644 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7645 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7646 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7647 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
7648 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7649 // CHECK9:       omp.precond.then:
7650 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7651 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7652 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
7653 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7654 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
7655 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7656 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
7657 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
7658 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
7659 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7660 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7661 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7662 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
7663 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7664 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7665 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
7666 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7667 // CHECK9:       omp.inner.for.cond:
7668 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7669 // CHECK9-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
7670 // CHECK9-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7671 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
7672 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7673 // CHECK9:       omp.inner.for.body:
7674 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7675 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
7676 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7677 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
7678 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
7679 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
7680 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
7681 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
7682 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
7683 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
7684 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
7685 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
7686 // CHECK9-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
7687 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7688 // CHECK9:       omp.body.continue:
7689 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7690 // CHECK9:       omp.inner.for.inc:
7691 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7692 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7693 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
7694 // CHECK9-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
7695 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
7696 // CHECK9:       omp.inner.for.end:
7697 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7698 // CHECK9:       omp.loop.exit:
7699 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7700 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
7701 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
7702 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7703 // CHECK9:       omp.precond.end:
7704 // CHECK9-NEXT:    ret void
7705 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
7706 // CHECK10-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
7707 // CHECK10-NEXT:  entry:
7708 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7709 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7710 // CHECK10-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
7711 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7712 // CHECK10-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
7713 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
7714 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
7715 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
7716 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7717 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7718 // CHECK10-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
7719 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7720 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7721 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
7722 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
7723 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
7724 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
7725 // CHECK10:       .execute:
7726 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
7727 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
7728 // CHECK10-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7729 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
7730 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
7731 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
7732 // CHECK10-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
7733 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
7734 // CHECK10-NEXT:    [[TMP5:%.*]] = load i64, i64* [[L_CASTED]], align 8
7735 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
7736 // CHECK10-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
7737 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
7738 // CHECK10:       .omp.deinit:
7739 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
7740 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
7741 // CHECK10:       .exit:
7742 // CHECK10-NEXT:    ret void
7743 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__
7744 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
7745 // CHECK10-NEXT:  entry:
7746 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7747 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7748 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7749 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7750 // CHECK10-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
7751 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7752 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7753 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7754 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
7755 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
7756 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7757 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7758 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7759 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7760 // CHECK10-NEXT:    [[I5:%.*]] = alloca i32, align 4
7761 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7762 // CHECK10-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
7763 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
7764 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7765 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7766 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7767 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7768 // CHECK10-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
7769 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7770 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7771 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
7772 // CHECK10-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1)
7773 // CHECK10-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
7774 // CHECK10-NEXT:    [[L2:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
7775 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
7776 // CHECK10-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
7777 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7778 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
7779 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7780 // CHECK10-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
7781 // CHECK10-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
7782 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
7783 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7784 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
7785 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7786 // CHECK10:       omp.precond.then:
7787 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7788 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7789 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
7790 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7791 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7792 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7793 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
7794 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
7795 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7796 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7797 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
7798 // CHECK10-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7799 // CHECK10:       cond.true:
7800 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7801 // CHECK10-NEXT:    br label [[COND_END:%.*]]
7802 // CHECK10:       cond.false:
7803 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7804 // CHECK10-NEXT:    br label [[COND_END]]
7805 // CHECK10:       cond.end:
7806 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
7807 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7808 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7809 // CHECK10-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
7810 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7811 // CHECK10:       omp.inner.for.cond:
7812 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7813 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7814 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
7815 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
7816 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7817 // CHECK10:       omp.inner.for.body:
7818 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7819 // CHECK10-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
7820 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7821 // CHECK10-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
7822 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
7823 // CHECK10-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7824 // CHECK10-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4
7825 // CHECK10-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
7826 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
7827 // CHECK10-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
7828 // CHECK10-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4
7829 // CHECK10-NEXT:    [[TMP23:%.*]] = load i64, i64* [[L_CASTED]], align 8
7830 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
7831 // CHECK10-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP17]] to i8*
7832 // CHECK10-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
7833 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
7834 // CHECK10-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP19]] to i8*
7835 // CHECK10-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
7836 // CHECK10-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
7837 // CHECK10-NEXT:    [[TMP29:%.*]] = inttoptr i64 [[TMP21]] to i8*
7838 // CHECK10-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
7839 // CHECK10-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
7840 // CHECK10-NEXT:    [[TMP31:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
7841 // CHECK10-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
7842 // CHECK10-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
7843 // CHECK10-NEXT:    [[TMP33:%.*]] = inttoptr i64 [[TMP23]] to i8*
7844 // CHECK10-NEXT:    store i8* [[TMP33]], i8** [[TMP32]], align 8
7845 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7846 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
7847 // CHECK10-NEXT:    [[TMP36:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
7848 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP35]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP36]], i64 5)
7849 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7850 // CHECK10:       omp.inner.for.inc:
7851 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7852 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7853 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
7854 // CHECK10-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
7855 // CHECK10-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7856 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7857 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
7858 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
7859 // CHECK10-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7860 // CHECK10-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7861 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP41]], [[TMP42]]
7862 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
7863 // CHECK10-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7864 // CHECK10-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7865 // CHECK10-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP43]], [[TMP44]]
7866 // CHECK10-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
7867 // CHECK10:       cond.true14:
7868 // CHECK10-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
7869 // CHECK10-NEXT:    br label [[COND_END16:%.*]]
7870 // CHECK10:       cond.false15:
7871 // CHECK10-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7872 // CHECK10-NEXT:    br label [[COND_END16]]
7873 // CHECK10:       cond.end16:
7874 // CHECK10-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP45]], [[COND_TRUE14]] ], [ [[TMP46]], [[COND_FALSE15]] ]
7875 // CHECK10-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
7876 // CHECK10-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7877 // CHECK10-NEXT:    store i32 [[TMP47]], i32* [[DOTOMP_IV]], align 4
7878 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
7879 // CHECK10:       omp.inner.for.end:
7880 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7881 // CHECK10:       omp.loop.exit:
7882 // CHECK10-NEXT:    [[TMP48:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7883 // CHECK10-NEXT:    [[TMP49:%.*]] = load i32, i32* [[TMP48]], align 4
7884 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP49]])
7885 // CHECK10-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7886 // CHECK10-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
7887 // CHECK10-NEXT:    br i1 [[TMP51]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
7888 // CHECK10:       .omp.lastprivate.then:
7889 // CHECK10-NEXT:    [[TMP52:%.*]] = load i32, i32* [[CONV1]], align 8
7890 // CHECK10-NEXT:    store i32 [[TMP52]], i32* [[CONV1]], align 8
7891 // CHECK10-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
7892 // CHECK10:       .omp.lastprivate.done:
7893 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
7894 // CHECK10:       omp.precond.end:
7895 // CHECK10-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
7896 // CHECK10-NEXT:    ret void
7897 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__1
7898 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
7899 // CHECK10-NEXT:  entry:
7900 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7901 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7902 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7903 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7904 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7905 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
7906 // CHECK10-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
7907 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7908 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7909 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7910 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7911 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
7912 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7913 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7914 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7915 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7916 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
7917 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7918 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7919 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7920 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7921 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7922 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
7923 // CHECK10-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
7924 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7925 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
7926 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
7927 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
7928 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
7929 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7930 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
7931 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7932 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7933 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7934 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
7935 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7936 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
7937 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7938 // CHECK10:       omp.precond.then:
7939 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7940 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7941 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
7942 // CHECK10-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7943 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
7944 // CHECK10-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7945 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
7946 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
7947 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
7948 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7949 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7950 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7951 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
7952 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
7953 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7954 // CHECK10:       omp.dispatch.cond:
7955 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7956 // CHECK10-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP9]] to i64
7957 // CHECK10-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7958 // CHECK10-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP10]]
7959 // CHECK10-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7960 // CHECK10:       cond.true:
7961 // CHECK10-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7962 // CHECK10-NEXT:    br label [[COND_END:%.*]]
7963 // CHECK10:       cond.false:
7964 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7965 // CHECK10-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP12]] to i64
7966 // CHECK10-NEXT:    br label [[COND_END]]
7967 // CHECK10:       cond.end:
7968 // CHECK10-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP11]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
7969 // CHECK10-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
7970 // CHECK10-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
7971 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7972 // CHECK10-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
7973 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7974 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7975 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
7976 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7977 // CHECK10:       omp.dispatch.body:
7978 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7979 // CHECK10:       omp.inner.for.cond:
7980 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7981 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7982 // CHECK10-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
7983 // CHECK10-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7984 // CHECK10:       omp.inner.for.body:
7985 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7986 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
7987 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7988 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
7989 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
7990 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
7991 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
7992 // CHECK10-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
7993 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
7994 // CHECK10-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
7995 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7996 // CHECK10:       omp.body.continue:
7997 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7998 // CHECK10:       omp.inner.for.inc:
7999 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8000 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP21]], 1
8001 // CHECK10-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
8002 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8003 // CHECK10:       omp.inner.for.end:
8004 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8005 // CHECK10:       omp.dispatch.inc:
8006 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8007 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8008 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
8009 // CHECK10-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_LB]], align 4
8010 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8011 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8012 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
8013 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_UB]], align 4
8014 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
8015 // CHECK10:       omp.dispatch.end:
8016 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8017 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
8018 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
8019 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8020 // CHECK10-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
8021 // CHECK10-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
8022 // CHECK10:       .omp.lastprivate.then:
8023 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
8024 // CHECK10-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
8025 // CHECK10-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
8026 // CHECK10:       .omp.lastprivate.done:
8027 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
8028 // CHECK10:       omp.precond.end:
8029 // CHECK10-NEXT:    ret void
8030 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
8031 // CHECK10-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
8032 // CHECK10-NEXT:  entry:
8033 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8034 // CHECK10-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
8035 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8036 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
8037 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
8038 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
8039 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8040 // CHECK10-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
8041 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8042 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
8043 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8044 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
8045 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
8046 // CHECK10:       .execute:
8047 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
8048 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
8049 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8050 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
8051 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
8052 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
8053 // CHECK10-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
8054 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
8055 // CHECK10:       .omp.deinit:
8056 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
8057 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
8058 // CHECK10:       .exit:
8059 // CHECK10-NEXT:    ret void
8060 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__2
8061 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
8062 // CHECK10-NEXT:  entry:
8063 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8064 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8065 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8066 // CHECK10-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
8067 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8068 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8069 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8070 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8071 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8072 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8073 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8074 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8075 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8076 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
8077 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8078 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
8079 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8080 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8081 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8082 // CHECK10-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
8083 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8084 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
8085 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
8086 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
8087 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8088 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
8089 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8090 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8091 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8092 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
8093 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8094 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
8095 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8096 // CHECK10:       omp.precond.then:
8097 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8098 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8099 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
8100 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8101 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8102 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8103 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8104 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
8105 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
8106 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8107 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8108 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
8109 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8110 // CHECK10:       cond.true:
8111 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8112 // CHECK10-NEXT:    br label [[COND_END:%.*]]
8113 // CHECK10:       cond.false:
8114 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8115 // CHECK10-NEXT:    br label [[COND_END]]
8116 // CHECK10:       cond.end:
8117 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
8118 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8119 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8120 // CHECK10-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
8121 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8122 // CHECK10:       omp.inner.for.cond:
8123 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8124 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8125 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
8126 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
8127 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8128 // CHECK10:       omp.inner.for.body:
8129 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8130 // CHECK10-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
8131 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8132 // CHECK10-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
8133 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
8134 // CHECK10-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8135 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
8136 // CHECK10-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
8137 // CHECK10-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
8138 // CHECK10-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
8139 // CHECK10-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
8140 // CHECK10-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
8141 // CHECK10-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
8142 // CHECK10-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
8143 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
8144 // CHECK10-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
8145 // CHECK10-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
8146 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
8147 // CHECK10-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
8148 // CHECK10-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
8149 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8150 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
8151 // CHECK10-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
8152 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
8153 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8154 // CHECK10:       omp.inner.for.inc:
8155 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8156 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8157 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
8158 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
8159 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8160 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8161 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
8162 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
8163 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8164 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8165 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
8166 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
8167 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8168 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8169 // CHECK10-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
8170 // CHECK10-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
8171 // CHECK10:       cond.true11:
8172 // CHECK10-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8173 // CHECK10-NEXT:    br label [[COND_END13:%.*]]
8174 // CHECK10:       cond.false12:
8175 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8176 // CHECK10-NEXT:    br label [[COND_END13]]
8177 // CHECK10:       cond.end13:
8178 // CHECK10-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
8179 // CHECK10-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
8180 // CHECK10-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8181 // CHECK10-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
8182 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8183 // CHECK10:       omp.inner.for.end:
8184 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8185 // CHECK10:       omp.loop.exit:
8186 // CHECK10-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8187 // CHECK10-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
8188 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP43]])
8189 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
8190 // CHECK10:       omp.precond.end:
8191 // CHECK10-NEXT:    ret void
8192 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__3
8193 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
8194 // CHECK10-NEXT:  entry:
8195 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8196 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8197 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8198 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8199 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8200 // CHECK10-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
8201 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8202 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8203 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8204 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8205 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8206 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8207 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8208 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8209 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8210 // CHECK10-NEXT:    [[I5:%.*]] = alloca i32, align 4
8211 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8212 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8213 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8214 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8215 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8216 // CHECK10-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
8217 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8218 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
8219 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
8220 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
8221 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8222 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
8223 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8224 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8225 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8226 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
8227 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8228 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
8229 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8230 // CHECK10:       omp.precond.then:
8231 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8232 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8233 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
8234 // CHECK10-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8235 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
8236 // CHECK10-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8237 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
8238 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
8239 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
8240 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8241 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8242 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8243 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
8244 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8245 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8246 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
8247 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8248 // CHECK10:       omp.inner.for.cond:
8249 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8250 // CHECK10-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
8251 // CHECK10-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8252 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
8253 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8254 // CHECK10:       omp.inner.for.body:
8255 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8256 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
8257 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8258 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
8259 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
8260 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
8261 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
8262 // CHECK10-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
8263 // CHECK10-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
8264 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
8265 // CHECK10-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
8266 // CHECK10-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
8267 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8268 // CHECK10:       omp.body.continue:
8269 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8270 // CHECK10:       omp.inner.for.inc:
8271 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8272 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8273 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
8274 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
8275 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8276 // CHECK10:       omp.inner.for.end:
8277 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8278 // CHECK10:       omp.loop.exit:
8279 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8280 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
8281 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
8282 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
8283 // CHECK10:       omp.precond.end:
8284 // CHECK10-NEXT:    ret void
8285 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
8286 // CHECK10-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
8287 // CHECK10-NEXT:  entry:
8288 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8289 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
8290 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
8291 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
8292 // CHECK10-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8293 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8294 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8295 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
8296 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
8297 // CHECK10:       .execute:
8298 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
8299 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
8300 // CHECK10-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
8301 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
8302 // CHECK10:       .omp.deinit:
8303 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
8304 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
8305 // CHECK10:       .exit:
8306 // CHECK10-NEXT:    ret void
8307 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__4
8308 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
8309 // CHECK10-NEXT:  entry:
8310 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8311 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8312 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8313 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8314 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8315 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8316 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8317 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8318 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8319 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8320 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
8321 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8322 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8323 // CHECK10-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8324 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8325 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8326 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
8327 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8328 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8329 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8330 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8331 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
8332 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
8333 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8334 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
8335 // CHECK10-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8336 // CHECK10:       cond.true:
8337 // CHECK10-NEXT:    br label [[COND_END:%.*]]
8338 // CHECK10:       cond.false:
8339 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8340 // CHECK10-NEXT:    br label [[COND_END]]
8341 // CHECK10:       cond.end:
8342 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
8343 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8344 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8345 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
8346 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8347 // CHECK10:       omp.inner.for.cond:
8348 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8349 // CHECK10-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
8350 // CHECK10-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8351 // CHECK10:       omp.inner.for.body:
8352 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8353 // CHECK10-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
8354 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8355 // CHECK10-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
8356 // CHECK10-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
8357 // CHECK10-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
8358 // CHECK10-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
8359 // CHECK10-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
8360 // CHECK10-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
8361 // CHECK10-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
8362 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
8363 // CHECK10-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
8364 // CHECK10-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
8365 // CHECK10-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
8366 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
8367 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8368 // CHECK10:       omp.inner.for.inc:
8369 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8370 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8371 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
8372 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
8373 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8374 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8375 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
8376 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
8377 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8378 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8379 // CHECK10-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
8380 // CHECK10-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
8381 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8382 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
8383 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
8384 // CHECK10:       cond.true5:
8385 // CHECK10-NEXT:    br label [[COND_END7:%.*]]
8386 // CHECK10:       cond.false6:
8387 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8388 // CHECK10-NEXT:    br label [[COND_END7]]
8389 // CHECK10:       cond.end7:
8390 // CHECK10-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
8391 // CHECK10-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
8392 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8393 // CHECK10-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
8394 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8395 // CHECK10:       omp.inner.for.end:
8396 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8397 // CHECK10:       omp.loop.exit:
8398 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
8399 // CHECK10-NEXT:    ret void
8400 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__5
8401 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
8402 // CHECK10-NEXT:  entry:
8403 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8404 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8405 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8406 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8407 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8408 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8409 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8410 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8411 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8412 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8413 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8414 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8415 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8416 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8417 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8418 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8419 // CHECK10-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8420 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8421 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8422 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
8423 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8424 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
8425 // CHECK10-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8426 // CHECK10-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
8427 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8428 // CHECK10-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
8429 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8430 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8431 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8432 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
8433 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8434 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8435 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
8436 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8437 // CHECK10:       omp.inner.for.cond:
8438 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8439 // CHECK10-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
8440 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8441 // CHECK10-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
8442 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8443 // CHECK10:       omp.inner.for.body:
8444 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8445 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
8446 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8447 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
8448 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
8449 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
8450 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
8451 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
8452 // CHECK10-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
8453 // CHECK10-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
8454 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8455 // CHECK10:       omp.body.continue:
8456 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8457 // CHECK10:       omp.inner.for.inc:
8458 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8459 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8460 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
8461 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
8462 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8463 // CHECK10:       omp.inner.for.end:
8464 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8465 // CHECK10:       omp.loop.exit:
8466 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
8467 // CHECK10-NEXT:    ret void
8468 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
8469 // CHECK10-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
8470 // CHECK10-NEXT:  entry:
8471 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8472 // CHECK10-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
8473 // CHECK10-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
8474 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
8475 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
8476 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
8477 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8478 // CHECK10-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
8479 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8480 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
8481 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8482 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
8483 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
8484 // CHECK10:       .execute:
8485 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
8486 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
8487 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
8488 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
8489 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[F_CASTED]], align 8
8490 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
8491 // CHECK10-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP3]]) #[[ATTR3]]
8492 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
8493 // CHECK10:       .omp.deinit:
8494 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
8495 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
8496 // CHECK10:       .exit:
8497 // CHECK10-NEXT:    ret void
8498 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__6
8499 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
8500 // CHECK10-NEXT:  entry:
8501 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8502 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8503 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8504 // CHECK10-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
8505 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8506 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8507 // CHECK10-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
8508 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8509 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8510 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8511 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8512 // CHECK10-NEXT:    [[K:%.*]] = alloca i32, align 4
8513 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8514 // CHECK10-NEXT:    [[J:%.*]] = alloca i32, align 4
8515 // CHECK10-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
8516 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
8517 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8518 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8519 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8520 // CHECK10-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
8521 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8522 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
8523 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8524 // CHECK10-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
8525 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8526 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8527 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8528 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8529 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
8530 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
8531 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8532 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
8533 // CHECK10-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8534 // CHECK10:       cond.true:
8535 // CHECK10-NEXT:    br label [[COND_END:%.*]]
8536 // CHECK10:       cond.false:
8537 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8538 // CHECK10-NEXT:    br label [[COND_END]]
8539 // CHECK10:       cond.end:
8540 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
8541 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8542 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8543 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
8544 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8545 // CHECK10:       omp.inner.for.cond:
8546 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8547 // CHECK10-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
8548 // CHECK10-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8549 // CHECK10:       omp.inner.for.body:
8550 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8551 // CHECK10-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
8552 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8553 // CHECK10-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
8554 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
8555 // CHECK10-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
8556 // CHECK10-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
8557 // CHECK10-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
8558 // CHECK10-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
8559 // CHECK10-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
8560 // CHECK10-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
8561 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
8562 // CHECK10-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
8563 // CHECK10-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
8564 // CHECK10-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
8565 // CHECK10-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
8566 // CHECK10-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
8567 // CHECK10-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
8568 // CHECK10-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
8569 // CHECK10-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
8570 // CHECK10-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
8571 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
8572 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8573 // CHECK10:       omp.inner.for.inc:
8574 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8575 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8576 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
8577 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
8578 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8579 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8580 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
8581 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
8582 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8583 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8584 // CHECK10-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
8585 // CHECK10-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
8586 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8587 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
8588 // CHECK10-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
8589 // CHECK10:       cond.true7:
8590 // CHECK10-NEXT:    br label [[COND_END9:%.*]]
8591 // CHECK10:       cond.false8:
8592 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8593 // CHECK10-NEXT:    br label [[COND_END9]]
8594 // CHECK10:       cond.end9:
8595 // CHECK10-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
8596 // CHECK10-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
8597 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8598 // CHECK10-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
8599 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8600 // CHECK10:       omp.inner.for.end:
8601 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8602 // CHECK10:       omp.loop.exit:
8603 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
8604 // CHECK10-NEXT:    ret void
8605 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__7
8606 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
8607 // CHECK10-NEXT:  entry:
8608 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8609 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8610 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8611 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8612 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8613 // CHECK10-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
8614 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8615 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8616 // CHECK10-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
8617 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8618 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8619 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8620 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8621 // CHECK10-NEXT:    [[K:%.*]] = alloca i32, align 4
8622 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8623 // CHECK10-NEXT:    [[J:%.*]] = alloca i32, align 4
8624 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8625 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8626 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8627 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8628 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8629 // CHECK10-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
8630 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8631 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
8632 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8633 // CHECK10-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
8634 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8635 // CHECK10-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
8636 // CHECK10-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8637 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
8638 // CHECK10-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
8639 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8640 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8641 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8642 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8643 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
8644 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8645 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8646 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
8647 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8648 // CHECK10:       omp.inner.for.cond:
8649 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8650 // CHECK10-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
8651 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8652 // CHECK10-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
8653 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8654 // CHECK10:       omp.inner.for.body:
8655 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8656 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
8657 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
8658 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8659 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
8660 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8661 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8662 // CHECK10-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
8663 // CHECK10-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
8664 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
8665 // CHECK10-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
8666 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
8667 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
8668 // CHECK10-NEXT:    store i32 10, i32* [[K]], align 4
8669 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
8670 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
8671 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
8672 // CHECK10-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
8673 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
8674 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
8675 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
8676 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
8677 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
8678 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
8679 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
8680 // CHECK10-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
8681 // CHECK10-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
8682 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
8683 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8684 // CHECK10:       omp.body.continue:
8685 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8686 // CHECK10:       omp.inner.for.inc:
8687 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8688 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8689 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
8690 // CHECK10-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
8691 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8692 // CHECK10:       omp.inner.for.end:
8693 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8694 // CHECK10:       omp.loop.exit:
8695 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
8696 // CHECK10-NEXT:    ret void
8697 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
8698 // CHECK10-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
8699 // CHECK10-NEXT:  entry:
8700 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8701 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8702 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8703 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
8704 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
8705 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
8706 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8707 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8708 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8709 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8710 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8711 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
8712 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
8713 // CHECK10:       .execute:
8714 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
8715 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
8716 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8717 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
8718 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
8719 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
8720 // CHECK10-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
8721 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
8722 // CHECK10:       .omp.deinit:
8723 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
8724 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
8725 // CHECK10:       .exit:
8726 // CHECK10-NEXT:    ret void
8727 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__8
8728 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
8729 // CHECK10-NEXT:  entry:
8730 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8731 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8732 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8733 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8734 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
8735 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8736 // CHECK10-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
8737 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8738 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8739 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
8740 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8741 // CHECK10-NEXT:    [[J:%.*]] = alloca i32, align 4
8742 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
8743 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
8744 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
8745 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8746 // CHECK10-NEXT:    [[I10:%.*]] = alloca i32, align 4
8747 // CHECK10-NEXT:    [[J11:%.*]] = alloca i32, align 4
8748 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8749 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
8750 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8751 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8752 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8753 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8754 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8755 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8756 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
8757 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
8758 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
8759 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8760 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8761 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
8762 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8763 // CHECK10-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
8764 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8765 // CHECK10-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
8766 // CHECK10-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
8767 // CHECK10-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
8768 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
8769 // CHECK10-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
8770 // CHECK10-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
8771 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
8772 // CHECK10-NEXT:    store i32 0, i32* [[J]], align 4
8773 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8774 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8775 // CHECK10-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
8776 // CHECK10:       land.lhs.true:
8777 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8778 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
8779 // CHECK10-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
8780 // CHECK10:       omp.precond.then:
8781 // CHECK10-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
8782 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8783 // CHECK10-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
8784 // CHECK10-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
8785 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8786 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
8787 // CHECK10-NEXT:    [[CONV12:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
8788 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8789 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8790 // CHECK10-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV12]])
8791 // CHECK10-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8792 // CHECK10-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8793 // CHECK10-NEXT:    [[CMP13:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
8794 // CHECK10-NEXT:    br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8795 // CHECK10:       cond.true:
8796 // CHECK10-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8797 // CHECK10-NEXT:    br label [[COND_END:%.*]]
8798 // CHECK10:       cond.false:
8799 // CHECK10-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8800 // CHECK10-NEXT:    br label [[COND_END]]
8801 // CHECK10:       cond.end:
8802 // CHECK10-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8803 // CHECK10-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
8804 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
8805 // CHECK10-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
8806 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8807 // CHECK10:       omp.inner.for.cond:
8808 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8809 // CHECK10-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8810 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
8811 // CHECK10-NEXT:    [[CMP14:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
8812 // CHECK10-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8813 // CHECK10:       omp.inner.for.body:
8814 // CHECK10-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
8815 // CHECK10-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8816 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
8817 // CHECK10-NEXT:    [[CONV15:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8818 // CHECK10-NEXT:    store i32 [[TMP19]], i32* [[CONV15]], align 4
8819 // CHECK10-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
8820 // CHECK10-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
8821 // CHECK10-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP17]] to i8*
8822 // CHECK10-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
8823 // CHECK10-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
8824 // CHECK10-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
8825 // CHECK10-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
8826 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
8827 // CHECK10-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
8828 // CHECK10-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
8829 // CHECK10-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
8830 // CHECK10-NEXT:    [[TMP28:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
8831 // CHECK10-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
8832 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8833 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8834 // CHECK10-NEXT:    [[TMP31:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
8835 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP31]], i64 4)
8836 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8837 // CHECK10:       omp.inner.for.inc:
8838 // CHECK10-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8839 // CHECK10-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
8840 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP32]], [[TMP33]]
8841 // CHECK10-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_IV]], align 8
8842 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
8843 // CHECK10-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
8844 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
8845 // CHECK10-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_COMB_LB]], align 8
8846 // CHECK10-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8847 // CHECK10-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
8848 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
8849 // CHECK10-NEXT:    store i64 [[ADD18]], i64* [[DOTOMP_COMB_UB]], align 8
8850 // CHECK10-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8851 // CHECK10-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8852 // CHECK10-NEXT:    [[CMP19:%.*]] = icmp sgt i64 [[TMP38]], [[TMP39]]
8853 // CHECK10-NEXT:    br i1 [[CMP19]], label [[COND_TRUE20:%.*]], label [[COND_FALSE21:%.*]]
8854 // CHECK10:       cond.true20:
8855 // CHECK10-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8856 // CHECK10-NEXT:    br label [[COND_END22:%.*]]
8857 // CHECK10:       cond.false21:
8858 // CHECK10-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
8859 // CHECK10-NEXT:    br label [[COND_END22]]
8860 // CHECK10:       cond.end22:
8861 // CHECK10-NEXT:    [[COND23:%.*]] = phi i64 [ [[TMP40]], [[COND_TRUE20]] ], [ [[TMP41]], [[COND_FALSE21]] ]
8862 // CHECK10-NEXT:    store i64 [[COND23]], i64* [[DOTOMP_COMB_UB]], align 8
8863 // CHECK10-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
8864 // CHECK10-NEXT:    store i64 [[TMP42]], i64* [[DOTOMP_IV]], align 8
8865 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
8866 // CHECK10:       omp.inner.for.end:
8867 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8868 // CHECK10:       omp.loop.exit:
8869 // CHECK10-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8870 // CHECK10-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
8871 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
8872 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
8873 // CHECK10:       omp.precond.end:
8874 // CHECK10-NEXT:    ret void
8875 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__9
8876 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
8877 // CHECK10-NEXT:  entry:
8878 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8879 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8880 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8881 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8882 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8883 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
8884 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
8885 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8886 // CHECK10-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
8887 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8888 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8889 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
8890 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
8891 // CHECK10-NEXT:    [[J:%.*]] = alloca i32, align 4
8892 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
8893 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
8894 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
8895 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8896 // CHECK10-NEXT:    [[I10:%.*]] = alloca i32, align 4
8897 // CHECK10-NEXT:    [[J11:%.*]] = alloca i32, align 4
8898 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8899 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8900 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8901 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8902 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8903 // CHECK10-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
8904 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8905 // CHECK10-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
8906 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
8907 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
8908 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
8909 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8910 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8911 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
8912 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8913 // CHECK10-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
8914 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8915 // CHECK10-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
8916 // CHECK10-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
8917 // CHECK10-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
8918 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
8919 // CHECK10-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
8920 // CHECK10-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
8921 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
8922 // CHECK10-NEXT:    store i32 0, i32* [[J]], align 4
8923 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8924 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8925 // CHECK10-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
8926 // CHECK10:       land.lhs.true:
8927 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8928 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
8929 // CHECK10-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
8930 // CHECK10:       omp.precond.then:
8931 // CHECK10-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
8932 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
8933 // CHECK10-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
8934 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8935 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8936 // CHECK10-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_LB]], align 8
8937 // CHECK10-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
8938 // CHECK10-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
8939 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8940 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8941 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8942 // CHECK10-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
8943 // CHECK10-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
8944 // CHECK10-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
8945 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8946 // CHECK10:       omp.inner.for.cond:
8947 // CHECK10-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8948 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8949 // CHECK10-NEXT:    [[CMP12:%.*]] = icmp ule i64 [[TMP13]], [[TMP14]]
8950 // CHECK10-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8951 // CHECK10:       omp.inner.for.body:
8952 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8953 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8954 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP16]], 0
8955 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8956 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 1, [[DIV14]]
8957 // CHECK10-NEXT:    [[CONV16:%.*]] = sext i32 [[MUL15]] to i64
8958 // CHECK10-NEXT:    [[DIV17:%.*]] = sdiv i64 [[TMP15]], [[CONV16]]
8959 // CHECK10-NEXT:    [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 1
8960 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL18]]
8961 // CHECK10-NEXT:    [[CONV19:%.*]] = trunc i64 [[ADD]] to i32
8962 // CHECK10-NEXT:    store i32 [[CONV19]], i32* [[I10]], align 4
8963 // CHECK10-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8964 // CHECK10-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8965 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8966 // CHECK10-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP19]], 0
8967 // CHECK10-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
8968 // CHECK10-NEXT:    [[MUL22:%.*]] = mul nsw i32 1, [[DIV21]]
8969 // CHECK10-NEXT:    [[CONV23:%.*]] = sext i32 [[MUL22]] to i64
8970 // CHECK10-NEXT:    [[DIV24:%.*]] = sdiv i64 [[TMP18]], [[CONV23]]
8971 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8972 // CHECK10-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[TMP20]], 0
8973 // CHECK10-NEXT:    [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1
8974 // CHECK10-NEXT:    [[MUL27:%.*]] = mul nsw i32 1, [[DIV26]]
8975 // CHECK10-NEXT:    [[CONV28:%.*]] = sext i32 [[MUL27]] to i64
8976 // CHECK10-NEXT:    [[MUL29:%.*]] = mul nsw i64 [[DIV24]], [[CONV28]]
8977 // CHECK10-NEXT:    [[SUB30:%.*]] = sub nsw i64 [[TMP17]], [[MUL29]]
8978 // CHECK10-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[SUB30]], 1
8979 // CHECK10-NEXT:    [[ADD32:%.*]] = add nsw i64 0, [[MUL31]]
8980 // CHECK10-NEXT:    [[CONV33:%.*]] = trunc i64 [[ADD32]] to i32
8981 // CHECK10-NEXT:    store i32 [[CONV33]], i32* [[J11]], align 4
8982 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
8983 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
8984 // CHECK10-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
8985 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
8986 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
8987 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
8988 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
8989 // CHECK10-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP24]] to i64
8990 // CHECK10-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM35]]
8991 // CHECK10-NEXT:    store i32 [[ADD34]], i32* [[ARRAYIDX36]], align 4
8992 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8993 // CHECK10:       omp.body.continue:
8994 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8995 // CHECK10:       omp.inner.for.inc:
8996 // CHECK10-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
8997 // CHECK10-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
8998 // CHECK10-NEXT:    [[ADD37:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
8999 // CHECK10-NEXT:    store i64 [[ADD37]], i64* [[DOTOMP_IV]], align 8
9000 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
9001 // CHECK10:       omp.inner.for.end:
9002 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9003 // CHECK10:       omp.loop.exit:
9004 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9005 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
9006 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
9007 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
9008 // CHECK10:       omp.precond.end:
9009 // CHECK10-NEXT:    ret void
9010 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
9011 // CHECK10-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
9012 // CHECK10-NEXT:  entry:
9013 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9014 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9015 // CHECK10-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
9016 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9017 // CHECK10-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
9018 // CHECK10-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
9019 // CHECK10-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
9020 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9021 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9022 // CHECK10-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
9023 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9024 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9025 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9026 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
9027 // CHECK10-NEXT:    br label [[DOTEXECUTE:%.*]]
9028 // CHECK10:       .execute:
9029 // CHECK10-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
9030 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9031 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9032 // CHECK10-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
9033 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9034 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 8
9035 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
9036 // CHECK10-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
9037 // CHECK10-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
9038 // CHECK10:       .omp.deinit:
9039 // CHECK10-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
9040 // CHECK10-NEXT:    br label [[DOTEXIT:%.*]]
9041 // CHECK10:       .exit:
9042 // CHECK10-NEXT:    ret void
9043 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__10
9044 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
9045 // CHECK10-NEXT:  entry:
9046 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9047 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9048 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9049 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9050 // CHECK10-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
9051 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9052 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9053 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9054 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9055 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
9056 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9057 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9058 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9059 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9060 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
9061 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9062 // CHECK10-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
9063 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9064 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9065 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9066 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9067 // CHECK10-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
9068 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9069 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9070 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
9071 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
9072 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9073 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
9074 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9075 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9076 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9077 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
9078 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9079 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
9080 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9081 // CHECK10:       omp.precond.then:
9082 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9083 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9084 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
9085 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9086 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9087 // CHECK10-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9088 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9089 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
9090 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
9091 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9092 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9093 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
9094 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9095 // CHECK10:       cond.true:
9096 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9097 // CHECK10-NEXT:    br label [[COND_END:%.*]]
9098 // CHECK10:       cond.false:
9099 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9100 // CHECK10-NEXT:    br label [[COND_END]]
9101 // CHECK10:       cond.end:
9102 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
9103 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9104 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9105 // CHECK10-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
9106 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9107 // CHECK10:       omp.inner.for.cond:
9108 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9109 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9110 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
9111 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
9112 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9113 // CHECK10:       omp.inner.for.body:
9114 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9115 // CHECK10-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
9116 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9117 // CHECK10-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9118 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
9119 // CHECK10-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9120 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
9121 // CHECK10-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
9122 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
9123 // CHECK10-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
9124 // CHECK10-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
9125 // CHECK10-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
9126 // CHECK10-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
9127 // CHECK10-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
9128 // CHECK10-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
9129 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
9130 // CHECK10-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
9131 // CHECK10-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
9132 // CHECK10-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
9133 // CHECK10-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
9134 // CHECK10-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
9135 // CHECK10-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
9136 // CHECK10-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
9137 // CHECK10-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
9138 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9139 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
9140 // CHECK10-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
9141 // CHECK10-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
9142 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9143 // CHECK10:       omp.inner.for.inc:
9144 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9145 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9146 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
9147 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
9148 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9149 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9150 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
9151 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
9152 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9153 // CHECK10-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9154 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
9155 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
9156 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9157 // CHECK10-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9158 // CHECK10-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
9159 // CHECK10-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
9160 // CHECK10:       cond.true11:
9161 // CHECK10-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9162 // CHECK10-NEXT:    br label [[COND_END13:%.*]]
9163 // CHECK10:       cond.false12:
9164 // CHECK10-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9165 // CHECK10-NEXT:    br label [[COND_END13]]
9166 // CHECK10:       cond.end13:
9167 // CHECK10-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
9168 // CHECK10-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
9169 // CHECK10-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9170 // CHECK10-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
9171 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
9172 // CHECK10:       omp.inner.for.end:
9173 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9174 // CHECK10:       omp.loop.exit:
9175 // CHECK10-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9176 // CHECK10-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
9177 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
9178 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
9179 // CHECK10:       omp.precond.end:
9180 // CHECK10-NEXT:    ret void
9181 // CHECK10-LABEL: define {{[^@]+}}@__omp_outlined__11
9182 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
9183 // CHECK10-NEXT:  entry:
9184 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9185 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9186 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9187 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9188 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9189 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9190 // CHECK10-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
9191 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9192 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9193 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9194 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9195 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
9196 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9197 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9198 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9199 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9200 // CHECK10-NEXT:    [[I5:%.*]] = alloca i32, align 4
9201 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9202 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9203 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9204 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9205 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9206 // CHECK10-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9207 // CHECK10-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
9208 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9209 // CHECK10-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9210 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
9211 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
9212 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9213 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
9214 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9215 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9216 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9217 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
9218 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9219 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
9220 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9221 // CHECK10:       omp.precond.then:
9222 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9223 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9224 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
9225 // CHECK10-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9226 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
9227 // CHECK10-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9228 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
9229 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
9230 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
9231 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9232 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9233 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9234 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
9235 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9236 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9237 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
9238 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9239 // CHECK10:       omp.inner.for.cond:
9240 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9241 // CHECK10-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
9242 // CHECK10-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9243 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
9244 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9245 // CHECK10:       omp.inner.for.body:
9246 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9247 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
9248 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9249 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
9250 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
9251 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
9252 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
9253 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
9254 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
9255 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
9256 // CHECK10-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
9257 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
9258 // CHECK10-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
9259 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9260 // CHECK10:       omp.body.continue:
9261 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9262 // CHECK10:       omp.inner.for.inc:
9263 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9264 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9265 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
9266 // CHECK10-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
9267 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]]
9268 // CHECK10:       omp.inner.for.end:
9269 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9270 // CHECK10:       omp.loop.exit:
9271 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9272 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
9273 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
9274 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
9275 // CHECK10:       omp.precond.end:
9276 // CHECK10-NEXT:    ret void
9277 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
9278 // CHECK11-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
9279 // CHECK11-NEXT:  entry:
9280 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9281 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9282 // CHECK11-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
9283 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9284 // CHECK11-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
9285 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
9286 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
9287 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
9288 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9289 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9290 // CHECK11-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
9291 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9292 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9293 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
9294 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9295 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
9296 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
9297 // CHECK11:       .execute:
9298 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
9299 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9300 // CHECK11-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9301 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
9302 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9303 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
9304 // CHECK11-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
9305 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
9306 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[L_CASTED]], align 8
9307 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
9308 // CHECK11-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
9309 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
9310 // CHECK11:       .omp.deinit:
9311 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
9312 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
9313 // CHECK11:       .exit:
9314 // CHECK11-NEXT:    ret void
9315 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__
9316 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
9317 // CHECK11-NEXT:  entry:
9318 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9319 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9320 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9321 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9322 // CHECK11-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
9323 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9324 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9325 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9326 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
9327 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9328 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9329 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9330 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9331 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9332 // CHECK11-NEXT:    [[I5:%.*]] = alloca i32, align 4
9333 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9334 // CHECK11-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
9335 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
9336 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9337 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9338 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9339 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9340 // CHECK11-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
9341 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9342 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9343 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
9344 // CHECK11-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
9345 // CHECK11-NEXT:    [[TMP2:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8
9346 // CHECK11-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
9347 // CHECK11-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8
9348 // CHECK11-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 0
9349 // CHECK11-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
9350 // CHECK11-NEXT:    [[L2:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
9351 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV]], align 8
9352 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
9353 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9354 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
9355 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9356 // CHECK11-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
9357 // CHECK11-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
9358 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
9359 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9360 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
9361 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9362 // CHECK11:       omp.precond.then:
9363 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9364 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9365 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
9366 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9367 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9368 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9369 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9370 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
9371 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9372 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9373 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9374 // CHECK11-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9375 // CHECK11:       cond.true:
9376 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9377 // CHECK11-NEXT:    br label [[COND_END:%.*]]
9378 // CHECK11:       cond.false:
9379 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9380 // CHECK11-NEXT:    br label [[COND_END]]
9381 // CHECK11:       cond.end:
9382 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9383 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9384 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9385 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9386 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9387 // CHECK11:       omp.inner.for.cond:
9388 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9389 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9390 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
9391 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
9392 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9393 // CHECK11:       omp.inner.for.body:
9394 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9395 // CHECK11-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9396 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9397 // CHECK11-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
9398 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[CONV]], align 8
9399 // CHECK11-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9400 // CHECK11-NEXT:    store i32 [[TMP23]], i32* [[CONV8]], align 4
9401 // CHECK11-NEXT:    [[TMP24:%.*]] = load i64, i64* [[N_CASTED]], align 8
9402 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV1]], align 8
9403 // CHECK11-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
9404 // CHECK11-NEXT:    store i32 [[TMP25]], i32* [[CONV9]], align 4
9405 // CHECK11-NEXT:    [[TMP26:%.*]] = load i64, i64* [[L_CASTED]], align 8
9406 // CHECK11-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
9407 // CHECK11-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP20]] to i8*
9408 // CHECK11-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
9409 // CHECK11-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
9410 // CHECK11-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP22]] to i8*
9411 // CHECK11-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
9412 // CHECK11-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
9413 // CHECK11-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP24]] to i8*
9414 // CHECK11-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 8
9415 // CHECK11-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
9416 // CHECK11-NEXT:    [[TMP34:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
9417 // CHECK11-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 8
9418 // CHECK11-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
9419 // CHECK11-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP26]] to i8*
9420 // CHECK11-NEXT:    store i8* [[TMP36]], i8** [[TMP35]], align 8
9421 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9422 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP37]], align 4
9423 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
9424 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP38]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP39]], i64 5)
9425 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9426 // CHECK11:       omp.inner.for.inc:
9427 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9428 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9429 // CHECK11-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
9430 // CHECK11-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
9431 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9432 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9433 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
9434 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
9435 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9436 // CHECK11-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9437 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP44]], [[TMP45]]
9438 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
9439 // CHECK11-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9440 // CHECK11-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9441 // CHECK11-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP46]], [[TMP47]]
9442 // CHECK11-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
9443 // CHECK11:       cond.true14:
9444 // CHECK11-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
9445 // CHECK11-NEXT:    br label [[COND_END16:%.*]]
9446 // CHECK11:       cond.false15:
9447 // CHECK11-NEXT:    [[TMP49:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9448 // CHECK11-NEXT:    br label [[COND_END16]]
9449 // CHECK11:       cond.end16:
9450 // CHECK11-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP48]], [[COND_TRUE14]] ], [ [[TMP49]], [[COND_FALSE15]] ]
9451 // CHECK11-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
9452 // CHECK11-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9453 // CHECK11-NEXT:    store i32 [[TMP50]], i32* [[DOTOMP_IV]], align 4
9454 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
9455 // CHECK11:       omp.inner.for.end:
9456 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9457 // CHECK11:       omp.loop.exit:
9458 // CHECK11-NEXT:    [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9459 // CHECK11-NEXT:    [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
9460 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]])
9461 // CHECK11-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9462 // CHECK11-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
9463 // CHECK11-NEXT:    br i1 [[TMP54]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
9464 // CHECK11:       .omp.lastprivate.then:
9465 // CHECK11-NEXT:    [[TMP55:%.*]] = load i32, i32* [[CONV1]], align 8
9466 // CHECK11-NEXT:    store i32 [[TMP55]], i32* [[CONV1]], align 8
9467 // CHECK11-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
9468 // CHECK11:       .omp.lastprivate.done:
9469 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
9470 // CHECK11:       omp.precond.end:
9471 // CHECK11-NEXT:    [[TMP56:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
9472 // CHECK11-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP56]])
9473 // CHECK11-NEXT:    ret void
9474 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__1
9475 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
9476 // CHECK11-NEXT:  entry:
9477 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9478 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9479 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9480 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9481 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9482 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
9483 // CHECK11-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
9484 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9485 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9486 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9487 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9488 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9489 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9490 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9491 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9492 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9493 // CHECK11-NEXT:    [[I6:%.*]] = alloca i32, align 4
9494 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9495 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9496 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9497 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9498 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9499 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
9500 // CHECK11-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
9501 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9502 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
9503 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
9504 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
9505 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
9506 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9507 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
9508 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9509 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9510 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9511 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
9512 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9513 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
9514 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9515 // CHECK11:       omp.precond.then:
9516 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9517 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9518 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
9519 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9520 // CHECK11-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
9521 // CHECK11-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9522 // CHECK11-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
9523 // CHECK11-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
9524 // CHECK11-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
9525 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9526 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9527 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9528 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
9529 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
9530 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
9531 // CHECK11:       omp.dispatch.cond:
9532 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9533 // CHECK11-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP9]] to i64
9534 // CHECK11-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9535 // CHECK11-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP10]]
9536 // CHECK11-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9537 // CHECK11:       cond.true:
9538 // CHECK11-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9539 // CHECK11-NEXT:    br label [[COND_END:%.*]]
9540 // CHECK11:       cond.false:
9541 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9542 // CHECK11-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP12]] to i64
9543 // CHECK11-NEXT:    br label [[COND_END]]
9544 // CHECK11:       cond.end:
9545 // CHECK11-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP11]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
9546 // CHECK11-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
9547 // CHECK11-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
9548 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9549 // CHECK11-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
9550 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9551 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9552 // CHECK11-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
9553 // CHECK11-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9554 // CHECK11:       omp.dispatch.body:
9555 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9556 // CHECK11:       omp.inner.for.cond:
9557 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9558 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9559 // CHECK11-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
9560 // CHECK11-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9561 // CHECK11:       omp.inner.for.body:
9562 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9563 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
9564 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9565 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
9566 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
9567 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
9568 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
9569 // CHECK11-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
9570 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
9571 // CHECK11-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
9572 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9573 // CHECK11:       omp.body.continue:
9574 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9575 // CHECK11:       omp.inner.for.inc:
9576 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9577 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP21]], 1
9578 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
9579 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
9580 // CHECK11:       omp.inner.for.end:
9581 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
9582 // CHECK11:       omp.dispatch.inc:
9583 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9584 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9585 // CHECK11-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
9586 // CHECK11-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_LB]], align 4
9587 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9588 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9589 // CHECK11-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
9590 // CHECK11-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_UB]], align 4
9591 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
9592 // CHECK11:       omp.dispatch.end:
9593 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9594 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
9595 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
9596 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9597 // CHECK11-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
9598 // CHECK11-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
9599 // CHECK11:       .omp.lastprivate.then:
9600 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
9601 // CHECK11-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
9602 // CHECK11-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
9603 // CHECK11:       .omp.lastprivate.done:
9604 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
9605 // CHECK11:       omp.precond.end:
9606 // CHECK11-NEXT:    ret void
9607 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
9608 // CHECK11-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
9609 // CHECK11-NEXT:  entry:
9610 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9611 // CHECK11-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
9612 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9613 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
9614 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
9615 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
9616 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9617 // CHECK11-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
9618 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9619 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
9620 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9621 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
9622 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
9623 // CHECK11:       .execute:
9624 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
9625 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
9626 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9627 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
9628 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
9629 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
9630 // CHECK11-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
9631 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
9632 // CHECK11:       .omp.deinit:
9633 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
9634 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
9635 // CHECK11:       .exit:
9636 // CHECK11-NEXT:    ret void
9637 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__2
9638 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
9639 // CHECK11-NEXT:  entry:
9640 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9641 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9642 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9643 // CHECK11-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
9644 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9645 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9646 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9647 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9648 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9649 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9650 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9651 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9652 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9653 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
9654 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
9655 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
9656 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9657 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9658 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9659 // CHECK11-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
9660 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9661 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
9662 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
9663 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
9664 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9665 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
9666 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9667 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9668 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9669 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
9670 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9671 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
9672 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9673 // CHECK11:       omp.precond.then:
9674 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9675 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9676 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
9677 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9678 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9679 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9680 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9681 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
9682 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
9683 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9684 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9685 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
9686 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9687 // CHECK11:       cond.true:
9688 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9689 // CHECK11-NEXT:    br label [[COND_END:%.*]]
9690 // CHECK11:       cond.false:
9691 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9692 // CHECK11-NEXT:    br label [[COND_END]]
9693 // CHECK11:       cond.end:
9694 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
9695 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9696 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9697 // CHECK11-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
9698 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9699 // CHECK11:       omp.inner.for.cond:
9700 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9701 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9702 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
9703 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
9704 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9705 // CHECK11:       omp.inner.for.body:
9706 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9707 // CHECK11-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
9708 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9709 // CHECK11-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9710 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
9711 // CHECK11-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
9712 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
9713 // CHECK11-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
9714 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
9715 // CHECK11-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
9716 // CHECK11-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
9717 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
9718 // CHECK11-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
9719 // CHECK11-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
9720 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
9721 // CHECK11-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
9722 // CHECK11-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
9723 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
9724 // CHECK11-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
9725 // CHECK11-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
9726 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9727 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
9728 // CHECK11-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
9729 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
9730 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9731 // CHECK11:       omp.inner.for.inc:
9732 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9733 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9734 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
9735 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
9736 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9737 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9738 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
9739 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
9740 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9741 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9742 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
9743 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
9744 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9745 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9746 // CHECK11-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
9747 // CHECK11-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
9748 // CHECK11:       cond.true11:
9749 // CHECK11-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9750 // CHECK11-NEXT:    br label [[COND_END13:%.*]]
9751 // CHECK11:       cond.false12:
9752 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9753 // CHECK11-NEXT:    br label [[COND_END13]]
9754 // CHECK11:       cond.end13:
9755 // CHECK11-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
9756 // CHECK11-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
9757 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9758 // CHECK11-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
9759 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
9760 // CHECK11:       omp.inner.for.end:
9761 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9762 // CHECK11:       omp.loop.exit:
9763 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9764 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
9765 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP43]])
9766 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
9767 // CHECK11:       omp.precond.end:
9768 // CHECK11-NEXT:    ret void
9769 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__3
9770 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
9771 // CHECK11-NEXT:  entry:
9772 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9773 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9774 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9775 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9776 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9777 // CHECK11-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
9778 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9779 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9780 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9781 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9782 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9783 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9784 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9785 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9786 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9787 // CHECK11-NEXT:    [[I5:%.*]] = alloca i32, align 4
9788 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9789 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9790 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9791 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9792 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9793 // CHECK11-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
9794 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9795 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
9796 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
9797 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
9798 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9799 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
9800 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9801 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9802 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9803 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
9804 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9805 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
9806 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9807 // CHECK11:       omp.precond.then:
9808 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9809 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9810 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
9811 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9812 // CHECK11-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
9813 // CHECK11-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9814 // CHECK11-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
9815 // CHECK11-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
9816 // CHECK11-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
9817 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9818 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9819 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9820 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
9821 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9822 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9823 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
9824 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9825 // CHECK11:       omp.inner.for.cond:
9826 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9827 // CHECK11-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
9828 // CHECK11-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9829 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
9830 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9831 // CHECK11:       omp.inner.for.body:
9832 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9833 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
9834 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9835 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
9836 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
9837 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
9838 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
9839 // CHECK11-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
9840 // CHECK11-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
9841 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
9842 // CHECK11-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
9843 // CHECK11-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
9844 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9845 // CHECK11:       omp.body.continue:
9846 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9847 // CHECK11:       omp.inner.for.inc:
9848 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9849 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9850 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
9851 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
9852 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
9853 // CHECK11:       omp.inner.for.end:
9854 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9855 // CHECK11:       omp.loop.exit:
9856 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9857 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
9858 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
9859 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
9860 // CHECK11:       omp.precond.end:
9861 // CHECK11-NEXT:    ret void
9862 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
9863 // CHECK11-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
9864 // CHECK11-NEXT:  entry:
9865 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9866 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
9867 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
9868 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
9869 // CHECK11-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9870 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9871 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9872 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
9873 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
9874 // CHECK11:       .execute:
9875 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
9876 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
9877 // CHECK11-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
9878 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
9879 // CHECK11:       .omp.deinit:
9880 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
9881 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
9882 // CHECK11:       .exit:
9883 // CHECK11-NEXT:    ret void
9884 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__4
9885 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
9886 // CHECK11-NEXT:  entry:
9887 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9888 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9889 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9890 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9891 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9892 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9893 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9894 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9895 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9896 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9897 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
9898 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9899 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9900 // CHECK11-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9901 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9902 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9903 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
9904 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9905 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9906 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
9907 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9908 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
9909 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
9910 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9911 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
9912 // CHECK11-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9913 // CHECK11:       cond.true:
9914 // CHECK11-NEXT:    br label [[COND_END:%.*]]
9915 // CHECK11:       cond.false:
9916 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9917 // CHECK11-NEXT:    br label [[COND_END]]
9918 // CHECK11:       cond.end:
9919 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9920 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9921 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9922 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
9923 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9924 // CHECK11:       omp.inner.for.cond:
9925 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9926 // CHECK11-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
9927 // CHECK11-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9928 // CHECK11:       omp.inner.for.body:
9929 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9930 // CHECK11-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
9931 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9932 // CHECK11-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
9933 // CHECK11-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
9934 // CHECK11-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
9935 // CHECK11-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
9936 // CHECK11-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
9937 // CHECK11-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
9938 // CHECK11-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
9939 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
9940 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
9941 // CHECK11-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
9942 // CHECK11-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
9943 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
9944 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9945 // CHECK11:       omp.inner.for.inc:
9946 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9947 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9948 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
9949 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
9950 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9951 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9952 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
9953 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
9954 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9955 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9956 // CHECK11-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
9957 // CHECK11-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
9958 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9959 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
9960 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
9961 // CHECK11:       cond.true5:
9962 // CHECK11-NEXT:    br label [[COND_END7:%.*]]
9963 // CHECK11:       cond.false6:
9964 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9965 // CHECK11-NEXT:    br label [[COND_END7]]
9966 // CHECK11:       cond.end7:
9967 // CHECK11-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
9968 // CHECK11-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
9969 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9970 // CHECK11-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
9971 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
9972 // CHECK11:       omp.inner.for.end:
9973 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9974 // CHECK11:       omp.loop.exit:
9975 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
9976 // CHECK11-NEXT:    ret void
9977 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__5
9978 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
9979 // CHECK11-NEXT:  entry:
9980 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9981 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9982 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9983 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9984 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9985 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9986 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9987 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9988 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9989 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9990 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9991 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9992 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9993 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9994 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9995 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9996 // CHECK11-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9997 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9998 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9999 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
10000 // CHECK11-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10001 // CHECK11-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10002 // CHECK11-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10003 // CHECK11-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10004 // CHECK11-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10005 // CHECK11-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
10006 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10007 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10008 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10009 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
10010 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10011 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10012 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10013 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10014 // CHECK11:       omp.inner.for.cond:
10015 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10016 // CHECK11-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
10017 // CHECK11-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10018 // CHECK11-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
10019 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10020 // CHECK11:       omp.inner.for.body:
10021 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10022 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
10023 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10024 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
10025 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
10026 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
10027 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
10028 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
10029 // CHECK11-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
10030 // CHECK11-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
10031 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10032 // CHECK11:       omp.body.continue:
10033 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10034 // CHECK11:       omp.inner.for.inc:
10035 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10036 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10037 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
10038 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
10039 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10040 // CHECK11:       omp.inner.for.end:
10041 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10042 // CHECK11:       omp.loop.exit:
10043 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
10044 // CHECK11-NEXT:    ret void
10045 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
10046 // CHECK11-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
10047 // CHECK11-NEXT:  entry:
10048 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10049 // CHECK11-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
10050 // CHECK11-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
10051 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
10052 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
10053 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
10054 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10055 // CHECK11-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
10056 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10057 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
10058 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10059 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
10060 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
10061 // CHECK11:       .execute:
10062 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
10063 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10064 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
10065 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
10066 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[F_CASTED]], align 8
10067 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
10068 // CHECK11-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP3]]) #[[ATTR3]]
10069 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
10070 // CHECK11:       .omp.deinit:
10071 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
10072 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
10073 // CHECK11:       .exit:
10074 // CHECK11-NEXT:    ret void
10075 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__6
10076 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
10077 // CHECK11-NEXT:  entry:
10078 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10079 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10080 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10081 // CHECK11-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
10082 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10083 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10084 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
10085 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10086 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10087 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10088 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10089 // CHECK11-NEXT:    [[K:%.*]] = alloca i32, align 4
10090 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10091 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
10092 // CHECK11-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
10093 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
10094 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10095 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10096 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10097 // CHECK11-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
10098 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10099 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
10100 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10101 // CHECK11-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
10102 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10103 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10104 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10105 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10106 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
10107 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
10108 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10109 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
10110 // CHECK11-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10111 // CHECK11:       cond.true:
10112 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10113 // CHECK11:       cond.false:
10114 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10115 // CHECK11-NEXT:    br label [[COND_END]]
10116 // CHECK11:       cond.end:
10117 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10118 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10119 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10120 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10121 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10122 // CHECK11:       omp.inner.for.cond:
10123 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10124 // CHECK11-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
10125 // CHECK11-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10126 // CHECK11:       omp.inner.for.body:
10127 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10128 // CHECK11-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
10129 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10130 // CHECK11-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
10131 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
10132 // CHECK11-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
10133 // CHECK11-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
10134 // CHECK11-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
10135 // CHECK11-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
10136 // CHECK11-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
10137 // CHECK11-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
10138 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
10139 // CHECK11-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
10140 // CHECK11-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
10141 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
10142 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
10143 // CHECK11-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
10144 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
10145 // CHECK11-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
10146 // CHECK11-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
10147 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
10148 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
10149 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10150 // CHECK11:       omp.inner.for.inc:
10151 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10152 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10153 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
10154 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
10155 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10156 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10157 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
10158 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
10159 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10160 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10161 // CHECK11-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
10162 // CHECK11-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
10163 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10164 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
10165 // CHECK11-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
10166 // CHECK11:       cond.true7:
10167 // CHECK11-NEXT:    br label [[COND_END9:%.*]]
10168 // CHECK11:       cond.false8:
10169 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10170 // CHECK11-NEXT:    br label [[COND_END9]]
10171 // CHECK11:       cond.end9:
10172 // CHECK11-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
10173 // CHECK11-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
10174 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10175 // CHECK11-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
10176 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10177 // CHECK11:       omp.inner.for.end:
10178 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10179 // CHECK11:       omp.loop.exit:
10180 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
10181 // CHECK11-NEXT:    ret void
10182 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__7
10183 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
10184 // CHECK11-NEXT:  entry:
10185 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10186 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10187 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10188 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10189 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10190 // CHECK11-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
10191 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10192 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10193 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
10194 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10195 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10196 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10197 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10198 // CHECK11-NEXT:    [[K:%.*]] = alloca i32, align 4
10199 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10200 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
10201 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10202 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10203 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10204 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10205 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10206 // CHECK11-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
10207 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10208 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
10209 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10210 // CHECK11-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
10211 // CHECK11-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10212 // CHECK11-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
10213 // CHECK11-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10214 // CHECK11-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
10215 // CHECK11-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
10216 // CHECK11-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
10217 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10218 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10219 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10220 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
10221 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10222 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10223 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10224 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10225 // CHECK11:       omp.inner.for.cond:
10226 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10227 // CHECK11-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
10228 // CHECK11-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10229 // CHECK11-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
10230 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10231 // CHECK11:       omp.inner.for.body:
10232 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10233 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
10234 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
10235 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10236 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
10237 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10238 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10239 // CHECK11-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
10240 // CHECK11-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
10241 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
10242 // CHECK11-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
10243 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
10244 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
10245 // CHECK11-NEXT:    store i32 10, i32* [[K]], align 4
10246 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
10247 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
10248 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
10249 // CHECK11-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
10250 // CHECK11-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
10251 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
10252 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
10253 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
10254 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
10255 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
10256 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
10257 // CHECK11-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
10258 // CHECK11-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
10259 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
10260 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10261 // CHECK11:       omp.body.continue:
10262 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10263 // CHECK11:       omp.inner.for.inc:
10264 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10265 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10266 // CHECK11-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
10267 // CHECK11-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
10268 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10269 // CHECK11:       omp.inner.for.end:
10270 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10271 // CHECK11:       omp.loop.exit:
10272 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
10273 // CHECK11-NEXT:    ret void
10274 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
10275 // CHECK11-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
10276 // CHECK11-NEXT:  entry:
10277 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10278 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10279 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10280 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
10281 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
10282 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
10283 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10284 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10285 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10286 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10287 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10288 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
10289 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
10290 // CHECK11:       .execute:
10291 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
10292 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10293 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10294 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
10295 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
10296 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
10297 // CHECK11-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
10298 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
10299 // CHECK11:       .omp.deinit:
10300 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
10301 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
10302 // CHECK11:       .exit:
10303 // CHECK11-NEXT:    ret void
10304 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__8
10305 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
10306 // CHECK11-NEXT:  entry:
10307 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10308 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10309 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10310 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10311 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10312 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10313 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
10314 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10315 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10316 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
10317 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10318 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
10319 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10320 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10321 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10322 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10323 // CHECK11-NEXT:    [[I8:%.*]] = alloca i32, align 4
10324 // CHECK11-NEXT:    [[J9:%.*]] = alloca i32, align 4
10325 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10326 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
10327 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10328 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10329 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10330 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10331 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10332 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10333 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
10334 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
10335 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10336 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10337 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10338 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
10339 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10340 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10341 // CHECK11-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
10342 // CHECK11-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
10343 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
10344 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
10345 // CHECK11-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
10346 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10347 // CHECK11-NEXT:    store i32 0, i32* [[J]], align 4
10348 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10349 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
10350 // CHECK11-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
10351 // CHECK11:       land.lhs.true:
10352 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10353 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
10354 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
10355 // CHECK11:       omp.precond.then:
10356 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10357 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10358 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10359 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10360 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10361 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10362 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10363 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10364 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
10365 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10366 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10367 // CHECK11-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10368 // CHECK11-NEXT:    br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10369 // CHECK11:       cond.true:
10370 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10371 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10372 // CHECK11:       cond.false:
10373 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10374 // CHECK11-NEXT:    br label [[COND_END]]
10375 // CHECK11:       cond.end:
10376 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10377 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10378 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10379 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10380 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10381 // CHECK11:       omp.inner.for.cond:
10382 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10383 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10384 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
10385 // CHECK11-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
10386 // CHECK11-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10387 // CHECK11:       omp.inner.for.body:
10388 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10389 // CHECK11-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
10390 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10391 // CHECK11-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10392 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV]], align 8
10393 // CHECK11-NEXT:    [[CONV12:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10394 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[CONV12]], align 4
10395 // CHECK11-NEXT:    [[TMP22:%.*]] = load i64, i64* [[N_CASTED]], align 8
10396 // CHECK11-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
10397 // CHECK11-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
10398 // CHECK11-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
10399 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
10400 // CHECK11-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
10401 // CHECK11-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
10402 // CHECK11-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
10403 // CHECK11-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP22]] to i8*
10404 // CHECK11-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
10405 // CHECK11-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
10406 // CHECK11-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
10407 // CHECK11-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
10408 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10409 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
10410 // CHECK11-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
10411 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i64 4)
10412 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10413 // CHECK11:       omp.inner.for.inc:
10414 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10415 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10416 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
10417 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
10418 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10419 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10420 // CHECK11-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
10421 // CHECK11-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_COMB_LB]], align 4
10422 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10423 // CHECK11-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10424 // CHECK11-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
10425 // CHECK11-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_COMB_UB]], align 4
10426 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10427 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10428 // CHECK11-NEXT:    [[CMP16:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
10429 // CHECK11-NEXT:    br i1 [[CMP16]], label [[COND_TRUE17:%.*]], label [[COND_FALSE18:%.*]]
10430 // CHECK11:       cond.true17:
10431 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10432 // CHECK11-NEXT:    br label [[COND_END19:%.*]]
10433 // CHECK11:       cond.false18:
10434 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10435 // CHECK11-NEXT:    br label [[COND_END19]]
10436 // CHECK11:       cond.end19:
10437 // CHECK11-NEXT:    [[COND20:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE17]] ], [ [[TMP43]], [[COND_FALSE18]] ]
10438 // CHECK11-NEXT:    store i32 [[COND20]], i32* [[DOTOMP_COMB_UB]], align 4
10439 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10440 // CHECK11-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
10441 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10442 // CHECK11:       omp.inner.for.end:
10443 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10444 // CHECK11:       omp.loop.exit:
10445 // CHECK11-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10446 // CHECK11-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
10447 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
10448 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10449 // CHECK11:       omp.precond.end:
10450 // CHECK11-NEXT:    ret void
10451 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__9
10452 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
10453 // CHECK11-NEXT:  entry:
10454 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10455 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10456 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10457 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10458 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10459 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
10460 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10461 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10462 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
10463 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10464 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10465 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
10466 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10467 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
10468 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10469 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10470 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10471 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10472 // CHECK11-NEXT:    [[I10:%.*]] = alloca i32, align 4
10473 // CHECK11-NEXT:    [[J11:%.*]] = alloca i32, align 4
10474 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10475 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10476 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10477 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10478 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10479 // CHECK11-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
10480 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10481 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
10482 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
10483 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
10484 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10485 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10486 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10487 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
10488 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10489 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10490 // CHECK11-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
10491 // CHECK11-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
10492 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
10493 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
10494 // CHECK11-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
10495 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10496 // CHECK11-NEXT:    store i32 0, i32* [[J]], align 4
10497 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10498 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
10499 // CHECK11-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
10500 // CHECK11:       land.lhs.true:
10501 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10502 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
10503 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
10504 // CHECK11:       omp.precond.then:
10505 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10506 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10507 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10508 // CHECK11-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10509 // CHECK11-NEXT:    [[CONV8:%.*]] = trunc i64 [[TMP8]] to i32
10510 // CHECK11-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10511 // CHECK11-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP9]] to i32
10512 // CHECK11-NEXT:    store i32 [[CONV8]], i32* [[DOTOMP_LB]], align 4
10513 // CHECK11-NEXT:    store i32 [[CONV9]], i32* [[DOTOMP_UB]], align 4
10514 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10515 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10516 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10517 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10518 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10519 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10520 // CHECK11-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
10521 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10522 // CHECK11:       omp.inner.for.cond:
10523 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10524 // CHECK11-NEXT:    [[CONV12:%.*]] = sext i32 [[TMP13]] to i64
10525 // CHECK11-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10526 // CHECK11-NEXT:    [[CMP13:%.*]] = icmp ule i64 [[CONV12]], [[TMP14]]
10527 // CHECK11-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10528 // CHECK11:       omp.inner.for.body:
10529 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10530 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10531 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP16]], 0
10532 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
10533 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 1, [[DIV15]]
10534 // CHECK11-NEXT:    [[DIV17:%.*]] = sdiv i32 [[TMP15]], [[MUL16]]
10535 // CHECK11-NEXT:    [[MUL18:%.*]] = mul nsw i32 [[DIV17]], 1
10536 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL18]]
10537 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I10]], align 4
10538 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10539 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10540 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10541 // CHECK11-NEXT:    [[SUB19:%.*]] = sub nsw i32 [[TMP19]], 0
10542 // CHECK11-NEXT:    [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1
10543 // CHECK11-NEXT:    [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]]
10544 // CHECK11-NEXT:    [[DIV22:%.*]] = sdiv i32 [[TMP18]], [[MUL21]]
10545 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10546 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP20]], 0
10547 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
10548 // CHECK11-NEXT:    [[MUL25:%.*]] = mul nsw i32 1, [[DIV24]]
10549 // CHECK11-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[DIV22]], [[MUL25]]
10550 // CHECK11-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP17]], [[MUL26]]
10551 // CHECK11-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[SUB27]], 1
10552 // CHECK11-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
10553 // CHECK11-NEXT:    store i32 [[ADD29]], i32* [[J11]], align 4
10554 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
10555 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
10556 // CHECK11-NEXT:    [[ADD30:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10557 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
10558 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
10559 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
10560 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
10561 // CHECK11-NEXT:    [[IDXPROM31:%.*]] = sext i32 [[TMP24]] to i64
10562 // CHECK11-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM31]]
10563 // CHECK11-NEXT:    store i32 [[ADD30]], i32* [[ARRAYIDX32]], align 4
10564 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10565 // CHECK11:       omp.body.continue:
10566 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10567 // CHECK11:       omp.inner.for.inc:
10568 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10569 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10570 // CHECK11-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10571 // CHECK11-NEXT:    store i32 [[ADD33]], i32* [[DOTOMP_IV]], align 4
10572 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10573 // CHECK11:       omp.inner.for.end:
10574 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10575 // CHECK11:       omp.loop.exit:
10576 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10577 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
10578 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
10579 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10580 // CHECK11:       omp.precond.end:
10581 // CHECK11-NEXT:    ret void
10582 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
10583 // CHECK11-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
10584 // CHECK11-NEXT:  entry:
10585 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10586 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
10587 // CHECK11-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
10588 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10589 // CHECK11-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
10590 // CHECK11-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
10591 // CHECK11-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
10592 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10593 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
10594 // CHECK11-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
10595 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10596 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
10597 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10598 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
10599 // CHECK11-NEXT:    br label [[DOTEXECUTE:%.*]]
10600 // CHECK11:       .execute:
10601 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
10602 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10603 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10604 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
10605 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
10606 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 8
10607 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
10608 // CHECK11-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
10609 // CHECK11-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
10610 // CHECK11:       .omp.deinit:
10611 // CHECK11-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
10612 // CHECK11-NEXT:    br label [[DOTEXIT:%.*]]
10613 // CHECK11:       .exit:
10614 // CHECK11-NEXT:    ret void
10615 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__10
10616 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
10617 // CHECK11-NEXT:  entry:
10618 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10619 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10620 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10621 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
10622 // CHECK11-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
10623 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10624 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10625 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10626 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10627 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10628 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10629 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10630 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10631 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10632 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10633 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10634 // CHECK11-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
10635 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10636 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10637 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10638 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
10639 // CHECK11-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
10640 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10641 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
10642 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
10643 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
10644 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10645 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
10646 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10647 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10648 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10649 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10650 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10651 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
10652 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10653 // CHECK11:       omp.precond.then:
10654 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10655 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10656 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
10657 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10658 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10659 // CHECK11-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10660 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10661 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
10662 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
10663 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10664 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10665 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
10666 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10667 // CHECK11:       cond.true:
10668 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10669 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10670 // CHECK11:       cond.false:
10671 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10672 // CHECK11-NEXT:    br label [[COND_END]]
10673 // CHECK11:       cond.end:
10674 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
10675 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10676 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10677 // CHECK11-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
10678 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10679 // CHECK11:       omp.inner.for.cond:
10680 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10681 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10682 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
10683 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
10684 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10685 // CHECK11:       omp.inner.for.body:
10686 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10687 // CHECK11-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
10688 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10689 // CHECK11-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
10690 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
10691 // CHECK11-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10692 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
10693 // CHECK11-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
10694 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
10695 // CHECK11-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
10696 // CHECK11-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
10697 // CHECK11-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
10698 // CHECK11-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
10699 // CHECK11-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
10700 // CHECK11-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
10701 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
10702 // CHECK11-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
10703 // CHECK11-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
10704 // CHECK11-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
10705 // CHECK11-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
10706 // CHECK11-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
10707 // CHECK11-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
10708 // CHECK11-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
10709 // CHECK11-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
10710 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10711 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
10712 // CHECK11-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
10713 // CHECK11-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
10714 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10715 // CHECK11:       omp.inner.for.inc:
10716 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10717 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10718 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
10719 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
10720 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10721 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10722 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
10723 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
10724 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10725 // CHECK11-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10726 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
10727 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
10728 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10729 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10730 // CHECK11-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
10731 // CHECK11-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
10732 // CHECK11:       cond.true11:
10733 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10734 // CHECK11-NEXT:    br label [[COND_END13:%.*]]
10735 // CHECK11:       cond.false12:
10736 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10737 // CHECK11-NEXT:    br label [[COND_END13]]
10738 // CHECK11:       cond.end13:
10739 // CHECK11-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
10740 // CHECK11-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
10741 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10742 // CHECK11-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
10743 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10744 // CHECK11:       omp.inner.for.end:
10745 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10746 // CHECK11:       omp.loop.exit:
10747 // CHECK11-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10748 // CHECK11-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
10749 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
10750 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10751 // CHECK11:       omp.precond.end:
10752 // CHECK11-NEXT:    ret void
10753 // CHECK11-LABEL: define {{[^@]+}}@__omp_outlined__11
10754 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
10755 // CHECK11-NEXT:  entry:
10756 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10757 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10758 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10759 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10760 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10761 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
10762 // CHECK11-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
10763 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10764 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10765 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10766 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10767 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10768 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10769 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10770 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10771 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10772 // CHECK11-NEXT:    [[I5:%.*]] = alloca i32, align 4
10773 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10774 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10775 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10776 // CHECK11-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10777 // CHECK11-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10778 // CHECK11-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
10779 // CHECK11-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
10780 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10781 // CHECK11-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
10782 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
10783 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
10784 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10785 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
10786 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10787 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10788 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10789 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10790 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10791 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
10792 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10793 // CHECK11:       omp.precond.then:
10794 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10795 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10796 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
10797 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10798 // CHECK11-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
10799 // CHECK11-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10800 // CHECK11-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
10801 // CHECK11-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
10802 // CHECK11-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
10803 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10804 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10805 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10806 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
10807 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10808 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10809 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
10810 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10811 // CHECK11:       omp.inner.for.cond:
10812 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10813 // CHECK11-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
10814 // CHECK11-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10815 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
10816 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10817 // CHECK11:       omp.inner.for.body:
10818 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10819 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
10820 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10821 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
10822 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
10823 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
10824 // CHECK11-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
10825 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
10826 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
10827 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
10828 // CHECK11-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
10829 // CHECK11-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
10830 // CHECK11-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
10831 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10832 // CHECK11:       omp.body.continue:
10833 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10834 // CHECK11:       omp.inner.for.inc:
10835 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10836 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10837 // CHECK11-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
10838 // CHECK11-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
10839 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
10840 // CHECK11:       omp.inner.for.end:
10841 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10842 // CHECK11:       omp.loop.exit:
10843 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10844 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
10845 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
10846 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10847 // CHECK11:       omp.precond.end:
10848 // CHECK11-NEXT:    ret void
10849 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
10850 // CHECK12-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
10851 // CHECK12-NEXT:  entry:
10852 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10853 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
10854 // CHECK12-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
10855 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10856 // CHECK12-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
10857 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
10858 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
10859 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
10860 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10861 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
10862 // CHECK12-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
10863 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10864 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
10865 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
10866 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
10867 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
10868 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
10869 // CHECK12:       .execute:
10870 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
10871 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
10872 // CHECK12-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10873 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[CONV2]], align 4
10874 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
10875 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
10876 // CHECK12-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
10877 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
10878 // CHECK12-NEXT:    [[TMP5:%.*]] = load i64, i64* [[L_CASTED]], align 8
10879 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
10880 // CHECK12-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
10881 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
10882 // CHECK12:       .omp.deinit:
10883 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
10884 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
10885 // CHECK12:       .exit:
10886 // CHECK12-NEXT:    ret void
10887 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__
10888 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
10889 // CHECK12-NEXT:  entry:
10890 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10891 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10892 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10893 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
10894 // CHECK12-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
10895 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10896 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10897 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10898 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
10899 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
10900 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10901 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10902 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10903 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10904 // CHECK12-NEXT:    [[I5:%.*]] = alloca i32, align 4
10905 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10906 // CHECK12-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
10907 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
10908 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10909 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10910 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10911 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
10912 // CHECK12-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
10913 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10914 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
10915 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
10916 // CHECK12-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1)
10917 // CHECK12-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
10918 // CHECK12-NEXT:    [[L2:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
10919 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
10920 // CHECK12-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
10921 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10922 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
10923 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10924 // CHECK12-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
10925 // CHECK12-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
10926 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
10927 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10928 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
10929 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10930 // CHECK12:       omp.precond.then:
10931 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10932 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10933 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
10934 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10935 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10936 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10937 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
10938 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
10939 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10940 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10941 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
10942 // CHECK12-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10943 // CHECK12:       cond.true:
10944 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10945 // CHECK12-NEXT:    br label [[COND_END:%.*]]
10946 // CHECK12:       cond.false:
10947 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10948 // CHECK12-NEXT:    br label [[COND_END]]
10949 // CHECK12:       cond.end:
10950 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
10951 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10952 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10953 // CHECK12-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
10954 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10955 // CHECK12:       omp.inner.for.cond:
10956 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10957 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
10958 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
10959 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
10960 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10961 // CHECK12:       omp.inner.for.body:
10962 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10963 // CHECK12-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
10964 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10965 // CHECK12-NEXT:    [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
10966 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
10967 // CHECK12-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10968 // CHECK12-NEXT:    store i32 [[TMP20]], i32* [[CONV8]], align 4
10969 // CHECK12-NEXT:    [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
10970 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
10971 // CHECK12-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
10972 // CHECK12-NEXT:    store i32 [[TMP22]], i32* [[CONV9]], align 4
10973 // CHECK12-NEXT:    [[TMP23:%.*]] = load i64, i64* [[L_CASTED]], align 8
10974 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
10975 // CHECK12-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP17]] to i8*
10976 // CHECK12-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
10977 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
10978 // CHECK12-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP19]] to i8*
10979 // CHECK12-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
10980 // CHECK12-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
10981 // CHECK12-NEXT:    [[TMP29:%.*]] = inttoptr i64 [[TMP21]] to i8*
10982 // CHECK12-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
10983 // CHECK12-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
10984 // CHECK12-NEXT:    [[TMP31:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
10985 // CHECK12-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
10986 // CHECK12-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
10987 // CHECK12-NEXT:    [[TMP33:%.*]] = inttoptr i64 [[TMP23]] to i8*
10988 // CHECK12-NEXT:    store i8* [[TMP33]], i8** [[TMP32]], align 8
10989 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10990 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
10991 // CHECK12-NEXT:    [[TMP36:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
10992 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP35]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP36]], i64 5)
10993 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10994 // CHECK12:       omp.inner.for.inc:
10995 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10996 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10997 // CHECK12-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
10998 // CHECK12-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
10999 // CHECK12-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11000 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11001 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
11002 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
11003 // CHECK12-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11004 // CHECK12-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11005 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP41]], [[TMP42]]
11006 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
11007 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11008 // CHECK12-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11009 // CHECK12-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP43]], [[TMP44]]
11010 // CHECK12-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
11011 // CHECK12:       cond.true14:
11012 // CHECK12-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11013 // CHECK12-NEXT:    br label [[COND_END16:%.*]]
11014 // CHECK12:       cond.false15:
11015 // CHECK12-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11016 // CHECK12-NEXT:    br label [[COND_END16]]
11017 // CHECK12:       cond.end16:
11018 // CHECK12-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP45]], [[COND_TRUE14]] ], [ [[TMP46]], [[COND_FALSE15]] ]
11019 // CHECK12-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
11020 // CHECK12-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11021 // CHECK12-NEXT:    store i32 [[TMP47]], i32* [[DOTOMP_IV]], align 4
11022 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11023 // CHECK12:       omp.inner.for.end:
11024 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11025 // CHECK12:       omp.loop.exit:
11026 // CHECK12-NEXT:    [[TMP48:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11027 // CHECK12-NEXT:    [[TMP49:%.*]] = load i32, i32* [[TMP48]], align 4
11028 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP49]])
11029 // CHECK12-NEXT:    [[TMP50:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11030 // CHECK12-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
11031 // CHECK12-NEXT:    br i1 [[TMP51]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
11032 // CHECK12:       .omp.lastprivate.then:
11033 // CHECK12-NEXT:    [[TMP52:%.*]] = load i32, i32* [[CONV1]], align 8
11034 // CHECK12-NEXT:    store i32 [[TMP52]], i32* [[CONV1]], align 8
11035 // CHECK12-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
11036 // CHECK12:       .omp.lastprivate.done:
11037 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
11038 // CHECK12:       omp.precond.end:
11039 // CHECK12-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
11040 // CHECK12-NEXT:    ret void
11041 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__1
11042 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
11043 // CHECK12-NEXT:  entry:
11044 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11045 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11046 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11047 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11048 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11049 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
11050 // CHECK12-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
11051 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11052 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11053 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11054 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11055 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11056 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11057 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11058 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11059 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11060 // CHECK12-NEXT:    [[I6:%.*]] = alloca i32, align 4
11061 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11062 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11063 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11064 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11065 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11066 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
11067 // CHECK12-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
11068 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11069 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
11070 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
11071 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
11072 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
11073 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11074 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
11075 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11076 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11077 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11078 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
11079 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11080 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
11081 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11082 // CHECK12:       omp.precond.then:
11083 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11084 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11085 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
11086 // CHECK12-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11087 // CHECK12-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
11088 // CHECK12-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11089 // CHECK12-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
11090 // CHECK12-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
11091 // CHECK12-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
11092 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11093 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11094 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11095 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
11096 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
11097 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11098 // CHECK12:       omp.dispatch.cond:
11099 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11100 // CHECK12-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP9]] to i64
11101 // CHECK12-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11102 // CHECK12-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP10]]
11103 // CHECK12-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11104 // CHECK12:       cond.true:
11105 // CHECK12-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11106 // CHECK12-NEXT:    br label [[COND_END:%.*]]
11107 // CHECK12:       cond.false:
11108 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11109 // CHECK12-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP12]] to i64
11110 // CHECK12-NEXT:    br label [[COND_END]]
11111 // CHECK12:       cond.end:
11112 // CHECK12-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP11]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
11113 // CHECK12-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
11114 // CHECK12-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
11115 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11116 // CHECK12-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
11117 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11118 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11119 // CHECK12-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11120 // CHECK12-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11121 // CHECK12:       omp.dispatch.body:
11122 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11123 // CHECK12:       omp.inner.for.cond:
11124 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11125 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11126 // CHECK12-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11127 // CHECK12-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11128 // CHECK12:       omp.inner.for.body:
11129 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11130 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11131 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11132 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
11133 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
11134 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
11135 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11136 // CHECK12-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
11137 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
11138 // CHECK12-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
11139 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11140 // CHECK12:       omp.body.continue:
11141 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11142 // CHECK12:       omp.inner.for.inc:
11143 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11144 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP21]], 1
11145 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
11146 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11147 // CHECK12:       omp.inner.for.end:
11148 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11149 // CHECK12:       omp.dispatch.inc:
11150 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11151 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11152 // CHECK12-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
11153 // CHECK12-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_LB]], align 4
11154 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11155 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11156 // CHECK12-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
11157 // CHECK12-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_UB]], align 4
11158 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
11159 // CHECK12:       omp.dispatch.end:
11160 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11161 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
11162 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
11163 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11164 // CHECK12-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
11165 // CHECK12-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
11166 // CHECK12:       .omp.lastprivate.then:
11167 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
11168 // CHECK12-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
11169 // CHECK12-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
11170 // CHECK12:       .omp.lastprivate.done:
11171 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
11172 // CHECK12:       omp.precond.end:
11173 // CHECK12-NEXT:    ret void
11174 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
11175 // CHECK12-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
11176 // CHECK12-NEXT:  entry:
11177 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11178 // CHECK12-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
11179 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11180 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
11181 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
11182 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
11183 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11184 // CHECK12-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
11185 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11186 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
11187 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11188 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
11189 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
11190 // CHECK12:       .execute:
11191 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
11192 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11193 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11194 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
11195 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
11196 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
11197 // CHECK12-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
11198 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
11199 // CHECK12:       .omp.deinit:
11200 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
11201 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
11202 // CHECK12:       .exit:
11203 // CHECK12-NEXT:    ret void
11204 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__2
11205 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
11206 // CHECK12-NEXT:  entry:
11207 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11208 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11209 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11210 // CHECK12-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
11211 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11212 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11213 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11214 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11215 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11216 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11217 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11218 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11219 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11220 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
11221 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11222 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
11223 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11224 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11225 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11226 // CHECK12-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
11227 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11228 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
11229 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
11230 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
11231 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11232 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
11233 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11234 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11235 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11236 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
11237 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11238 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
11239 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11240 // CHECK12:       omp.precond.then:
11241 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11242 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11243 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
11244 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11245 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11246 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11247 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11248 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
11249 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
11250 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11251 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11252 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
11253 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11254 // CHECK12:       cond.true:
11255 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11256 // CHECK12-NEXT:    br label [[COND_END:%.*]]
11257 // CHECK12:       cond.false:
11258 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11259 // CHECK12-NEXT:    br label [[COND_END]]
11260 // CHECK12:       cond.end:
11261 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
11262 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11263 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11264 // CHECK12-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
11265 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11266 // CHECK12:       omp.inner.for.cond:
11267 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11268 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11269 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
11270 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
11271 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11272 // CHECK12:       omp.inner.for.body:
11273 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11274 // CHECK12-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
11275 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11276 // CHECK12-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
11277 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
11278 // CHECK12-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11279 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
11280 // CHECK12-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
11281 // CHECK12-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
11282 // CHECK12-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
11283 // CHECK12-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
11284 // CHECK12-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
11285 // CHECK12-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
11286 // CHECK12-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
11287 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
11288 // CHECK12-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
11289 // CHECK12-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
11290 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
11291 // CHECK12-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
11292 // CHECK12-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
11293 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11294 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
11295 // CHECK12-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
11296 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
11297 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11298 // CHECK12:       omp.inner.for.inc:
11299 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11300 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11301 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
11302 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
11303 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11304 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11305 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
11306 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
11307 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11308 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11309 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
11310 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
11311 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11312 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11313 // CHECK12-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
11314 // CHECK12-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
11315 // CHECK12:       cond.true11:
11316 // CHECK12-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11317 // CHECK12-NEXT:    br label [[COND_END13:%.*]]
11318 // CHECK12:       cond.false12:
11319 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11320 // CHECK12-NEXT:    br label [[COND_END13]]
11321 // CHECK12:       cond.end13:
11322 // CHECK12-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
11323 // CHECK12-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
11324 // CHECK12-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11325 // CHECK12-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
11326 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11327 // CHECK12:       omp.inner.for.end:
11328 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11329 // CHECK12:       omp.loop.exit:
11330 // CHECK12-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11331 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
11332 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP43]])
11333 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
11334 // CHECK12:       omp.precond.end:
11335 // CHECK12-NEXT:    ret void
11336 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__3
11337 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
11338 // CHECK12-NEXT:  entry:
11339 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11340 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11341 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11342 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11343 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11344 // CHECK12-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
11345 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11346 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11347 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11348 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11349 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11350 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11351 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11352 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11353 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11354 // CHECK12-NEXT:    [[I5:%.*]] = alloca i32, align 4
11355 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11356 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11357 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11358 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11359 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11360 // CHECK12-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
11361 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11362 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
11363 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
11364 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
11365 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11366 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
11367 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11368 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11369 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11370 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
11371 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11372 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
11373 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11374 // CHECK12:       omp.precond.then:
11375 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11376 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11377 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
11378 // CHECK12-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11379 // CHECK12-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
11380 // CHECK12-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11381 // CHECK12-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
11382 // CHECK12-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
11383 // CHECK12-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
11384 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11385 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11386 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11387 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
11388 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11389 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11390 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
11391 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11392 // CHECK12:       omp.inner.for.cond:
11393 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11394 // CHECK12-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
11395 // CHECK12-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11396 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
11397 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11398 // CHECK12:       omp.inner.for.body:
11399 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11400 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
11401 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11402 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
11403 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
11404 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
11405 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11406 // CHECK12-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
11407 // CHECK12-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
11408 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
11409 // CHECK12-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
11410 // CHECK12-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
11411 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11412 // CHECK12:       omp.body.continue:
11413 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11414 // CHECK12:       omp.inner.for.inc:
11415 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11416 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11417 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
11418 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
11419 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11420 // CHECK12:       omp.inner.for.end:
11421 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11422 // CHECK12:       omp.loop.exit:
11423 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11424 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
11425 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
11426 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
11427 // CHECK12:       omp.precond.end:
11428 // CHECK12-NEXT:    ret void
11429 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
11430 // CHECK12-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
11431 // CHECK12-NEXT:  entry:
11432 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
11433 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
11434 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
11435 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
11436 // CHECK12-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
11437 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
11438 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11439 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
11440 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
11441 // CHECK12:       .execute:
11442 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
11443 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
11444 // CHECK12-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
11445 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
11446 // CHECK12:       .omp.deinit:
11447 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
11448 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
11449 // CHECK12:       .exit:
11450 // CHECK12-NEXT:    ret void
11451 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__4
11452 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
11453 // CHECK12-NEXT:  entry:
11454 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11455 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11456 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
11457 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11458 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11459 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11460 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11461 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11462 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11463 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11464 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
11465 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11466 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11467 // CHECK12-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
11468 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
11469 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11470 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
11471 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11472 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11473 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11474 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11475 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
11476 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
11477 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11478 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
11479 // CHECK12-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11480 // CHECK12:       cond.true:
11481 // CHECK12-NEXT:    br label [[COND_END:%.*]]
11482 // CHECK12:       cond.false:
11483 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11484 // CHECK12-NEXT:    br label [[COND_END]]
11485 // CHECK12:       cond.end:
11486 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
11487 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11488 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11489 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11490 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11491 // CHECK12:       omp.inner.for.cond:
11492 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11493 // CHECK12-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
11494 // CHECK12-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11495 // CHECK12:       omp.inner.for.body:
11496 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11497 // CHECK12-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
11498 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11499 // CHECK12-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
11500 // CHECK12-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
11501 // CHECK12-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
11502 // CHECK12-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
11503 // CHECK12-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
11504 // CHECK12-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
11505 // CHECK12-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
11506 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
11507 // CHECK12-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
11508 // CHECK12-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
11509 // CHECK12-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
11510 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
11511 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11512 // CHECK12:       omp.inner.for.inc:
11513 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11514 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11515 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11516 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
11517 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11518 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11519 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
11520 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
11521 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11522 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11523 // CHECK12-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
11524 // CHECK12-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
11525 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11526 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
11527 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
11528 // CHECK12:       cond.true5:
11529 // CHECK12-NEXT:    br label [[COND_END7:%.*]]
11530 // CHECK12:       cond.false6:
11531 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11532 // CHECK12-NEXT:    br label [[COND_END7]]
11533 // CHECK12:       cond.end7:
11534 // CHECK12-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
11535 // CHECK12-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
11536 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11537 // CHECK12-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
11538 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11539 // CHECK12:       omp.inner.for.end:
11540 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11541 // CHECK12:       omp.loop.exit:
11542 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
11543 // CHECK12-NEXT:    ret void
11544 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__5
11545 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
11546 // CHECK12-NEXT:  entry:
11547 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11548 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11549 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11550 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11551 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
11552 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11553 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11554 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11555 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11556 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11557 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11558 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11559 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11560 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11561 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11562 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11563 // CHECK12-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
11564 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
11565 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11566 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11567 // CHECK12-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11568 // CHECK12-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
11569 // CHECK12-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11570 // CHECK12-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
11571 // CHECK12-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11572 // CHECK12-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
11573 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11574 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11575 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11576 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
11577 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11578 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11579 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11580 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11581 // CHECK12:       omp.inner.for.cond:
11582 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11583 // CHECK12-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
11584 // CHECK12-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11585 // CHECK12-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
11586 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11587 // CHECK12:       omp.inner.for.body:
11588 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11589 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
11590 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11591 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
11592 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
11593 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
11594 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11595 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
11596 // CHECK12-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
11597 // CHECK12-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
11598 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11599 // CHECK12:       omp.body.continue:
11600 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11601 // CHECK12:       omp.inner.for.inc:
11602 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11603 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11604 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
11605 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
11606 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11607 // CHECK12:       omp.inner.for.end:
11608 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11609 // CHECK12:       omp.loop.exit:
11610 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
11611 // CHECK12-NEXT:    ret void
11612 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
11613 // CHECK12-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
11614 // CHECK12-NEXT:  entry:
11615 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
11616 // CHECK12-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
11617 // CHECK12-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
11618 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
11619 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
11620 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
11621 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
11622 // CHECK12-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
11623 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
11624 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
11625 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11626 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
11627 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
11628 // CHECK12:       .execute:
11629 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
11630 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11631 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
11632 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
11633 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[F_CASTED]], align 8
11634 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
11635 // CHECK12-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP3]]) #[[ATTR3]]
11636 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
11637 // CHECK12:       .omp.deinit:
11638 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
11639 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
11640 // CHECK12:       .exit:
11641 // CHECK12-NEXT:    ret void
11642 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__6
11643 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
11644 // CHECK12-NEXT:  entry:
11645 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11646 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11647 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
11648 // CHECK12-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
11649 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11650 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11651 // CHECK12-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
11652 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11653 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11654 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11655 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11656 // CHECK12-NEXT:    [[K:%.*]] = alloca i32, align 4
11657 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11658 // CHECK12-NEXT:    [[J:%.*]] = alloca i32, align 4
11659 // CHECK12-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
11660 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
11661 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11662 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11663 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
11664 // CHECK12-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
11665 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
11666 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
11667 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11668 // CHECK12-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
11669 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11670 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11671 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11672 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11673 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
11674 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
11675 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11676 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
11677 // CHECK12-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11678 // CHECK12:       cond.true:
11679 // CHECK12-NEXT:    br label [[COND_END:%.*]]
11680 // CHECK12:       cond.false:
11681 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11682 // CHECK12-NEXT:    br label [[COND_END]]
11683 // CHECK12:       cond.end:
11684 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
11685 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11686 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11687 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11688 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11689 // CHECK12:       omp.inner.for.cond:
11690 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11691 // CHECK12-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
11692 // CHECK12-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11693 // CHECK12:       omp.inner.for.body:
11694 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11695 // CHECK12-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
11696 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11697 // CHECK12-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
11698 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
11699 // CHECK12-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
11700 // CHECK12-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
11701 // CHECK12-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
11702 // CHECK12-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
11703 // CHECK12-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
11704 // CHECK12-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
11705 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
11706 // CHECK12-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
11707 // CHECK12-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
11708 // CHECK12-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
11709 // CHECK12-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
11710 // CHECK12-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
11711 // CHECK12-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
11712 // CHECK12-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
11713 // CHECK12-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
11714 // CHECK12-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
11715 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
11716 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11717 // CHECK12:       omp.inner.for.inc:
11718 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11719 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11720 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
11721 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
11722 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11723 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11724 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
11725 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
11726 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11727 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11728 // CHECK12-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
11729 // CHECK12-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
11730 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11731 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
11732 // CHECK12-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
11733 // CHECK12:       cond.true7:
11734 // CHECK12-NEXT:    br label [[COND_END9:%.*]]
11735 // CHECK12:       cond.false8:
11736 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11737 // CHECK12-NEXT:    br label [[COND_END9]]
11738 // CHECK12:       cond.end9:
11739 // CHECK12-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
11740 // CHECK12-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
11741 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11742 // CHECK12-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
11743 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11744 // CHECK12:       omp.inner.for.end:
11745 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11746 // CHECK12:       omp.loop.exit:
11747 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
11748 // CHECK12-NEXT:    ret void
11749 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__7
11750 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
11751 // CHECK12-NEXT:  entry:
11752 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11753 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11754 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11755 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11756 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
11757 // CHECK12-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
11758 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11759 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11760 // CHECK12-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
11761 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11762 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11763 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11764 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11765 // CHECK12-NEXT:    [[K:%.*]] = alloca i32, align 4
11766 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11767 // CHECK12-NEXT:    [[J:%.*]] = alloca i32, align 4
11768 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11769 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11770 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11771 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11772 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
11773 // CHECK12-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
11774 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
11775 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
11776 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11777 // CHECK12-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
11778 // CHECK12-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11779 // CHECK12-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
11780 // CHECK12-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11781 // CHECK12-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
11782 // CHECK12-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
11783 // CHECK12-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
11784 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11785 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11786 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11787 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
11788 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11789 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11790 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11791 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11792 // CHECK12:       omp.inner.for.cond:
11793 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11794 // CHECK12-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
11795 // CHECK12-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11796 // CHECK12-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
11797 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11798 // CHECK12:       omp.inner.for.body:
11799 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11800 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
11801 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
11802 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11803 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
11804 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11805 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11806 // CHECK12-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
11807 // CHECK12-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
11808 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
11809 // CHECK12-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
11810 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
11811 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
11812 // CHECK12-NEXT:    store i32 10, i32* [[K]], align 4
11813 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
11814 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
11815 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
11816 // CHECK12-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
11817 // CHECK12-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
11818 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
11819 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
11820 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
11821 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
11822 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
11823 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
11824 // CHECK12-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
11825 // CHECK12-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
11826 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
11827 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11828 // CHECK12:       omp.body.continue:
11829 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11830 // CHECK12:       omp.inner.for.inc:
11831 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11832 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11833 // CHECK12-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
11834 // CHECK12-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
11835 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
11836 // CHECK12:       omp.inner.for.end:
11837 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11838 // CHECK12:       omp.loop.exit:
11839 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
11840 // CHECK12-NEXT:    ret void
11841 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
11842 // CHECK12-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
11843 // CHECK12-NEXT:  entry:
11844 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11845 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
11846 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11847 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
11848 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
11849 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
11850 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11851 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
11852 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11853 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
11854 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11855 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
11856 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
11857 // CHECK12:       .execute:
11858 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
11859 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11860 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11861 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
11862 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
11863 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
11864 // CHECK12-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
11865 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
11866 // CHECK12:       .omp.deinit:
11867 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
11868 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
11869 // CHECK12:       .exit:
11870 // CHECK12-NEXT:    ret void
11871 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__8
11872 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
11873 // CHECK12-NEXT:  entry:
11874 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11875 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11876 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11877 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
11878 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11879 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11880 // CHECK12-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
11881 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11882 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11883 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
11884 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
11885 // CHECK12-NEXT:    [[J:%.*]] = alloca i32, align 4
11886 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11887 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11888 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11889 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11890 // CHECK12-NEXT:    [[I8:%.*]] = alloca i32, align 4
11891 // CHECK12-NEXT:    [[J9:%.*]] = alloca i32, align 4
11892 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
11893 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
11894 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11895 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11896 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11897 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
11898 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11899 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
11900 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
11901 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
11902 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
11903 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11904 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11905 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
11906 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11907 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11908 // CHECK12-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
11909 // CHECK12-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
11910 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
11911 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
11912 // CHECK12-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
11913 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
11914 // CHECK12-NEXT:    store i32 0, i32* [[J]], align 4
11915 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11916 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11917 // CHECK12-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
11918 // CHECK12:       land.lhs.true:
11919 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11920 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
11921 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
11922 // CHECK12:       omp.precond.then:
11923 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11924 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11925 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11926 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11927 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11928 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
11929 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11930 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11931 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
11932 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11933 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11934 // CHECK12-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11935 // CHECK12-NEXT:    br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11936 // CHECK12:       cond.true:
11937 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11938 // CHECK12-NEXT:    br label [[COND_END:%.*]]
11939 // CHECK12:       cond.false:
11940 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11941 // CHECK12-NEXT:    br label [[COND_END]]
11942 // CHECK12:       cond.end:
11943 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11944 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11945 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11946 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11947 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11948 // CHECK12:       omp.inner.for.cond:
11949 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11950 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11951 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
11952 // CHECK12-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
11953 // CHECK12-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11954 // CHECK12:       omp.inner.for.body:
11955 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11956 // CHECK12-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11957 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11958 // CHECK12-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
11959 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV]], align 8
11960 // CHECK12-NEXT:    [[CONV12:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11961 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[CONV12]], align 4
11962 // CHECK12-NEXT:    [[TMP22:%.*]] = load i64, i64* [[N_CASTED]], align 8
11963 // CHECK12-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
11964 // CHECK12-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
11965 // CHECK12-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
11966 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
11967 // CHECK12-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
11968 // CHECK12-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
11969 // CHECK12-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
11970 // CHECK12-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP22]] to i8*
11971 // CHECK12-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
11972 // CHECK12-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
11973 // CHECK12-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
11974 // CHECK12-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
11975 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11976 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
11977 // CHECK12-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
11978 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i64 4)
11979 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11980 // CHECK12:       omp.inner.for.inc:
11981 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11982 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11983 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
11984 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
11985 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11986 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11987 // CHECK12-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
11988 // CHECK12-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_COMB_LB]], align 4
11989 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11990 // CHECK12-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11991 // CHECK12-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
11992 // CHECK12-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_COMB_UB]], align 4
11993 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11994 // CHECK12-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11995 // CHECK12-NEXT:    [[CMP16:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
11996 // CHECK12-NEXT:    br i1 [[CMP16]], label [[COND_TRUE17:%.*]], label [[COND_FALSE18:%.*]]
11997 // CHECK12:       cond.true17:
11998 // CHECK12-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
11999 // CHECK12-NEXT:    br label [[COND_END19:%.*]]
12000 // CHECK12:       cond.false18:
12001 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12002 // CHECK12-NEXT:    br label [[COND_END19]]
12003 // CHECK12:       cond.end19:
12004 // CHECK12-NEXT:    [[COND20:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE17]] ], [ [[TMP43]], [[COND_FALSE18]] ]
12005 // CHECK12-NEXT:    store i32 [[COND20]], i32* [[DOTOMP_COMB_UB]], align 4
12006 // CHECK12-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12007 // CHECK12-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
12008 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
12009 // CHECK12:       omp.inner.for.end:
12010 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12011 // CHECK12:       omp.loop.exit:
12012 // CHECK12-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12013 // CHECK12-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
12014 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
12015 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
12016 // CHECK12:       omp.precond.end:
12017 // CHECK12-NEXT:    ret void
12018 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__9
12019 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
12020 // CHECK12-NEXT:  entry:
12021 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12022 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12023 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12024 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12025 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12026 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
12027 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12028 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12029 // CHECK12-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
12030 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12031 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12032 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
12033 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
12034 // CHECK12-NEXT:    [[J:%.*]] = alloca i32, align 4
12035 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12036 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12037 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12038 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12039 // CHECK12-NEXT:    [[I10:%.*]] = alloca i32, align 4
12040 // CHECK12-NEXT:    [[J11:%.*]] = alloca i32, align 4
12041 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12042 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12043 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12044 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12045 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12046 // CHECK12-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
12047 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12048 // CHECK12-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
12049 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
12050 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12051 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12052 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12053 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12054 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
12055 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12056 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12057 // CHECK12-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
12058 // CHECK12-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
12059 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
12060 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
12061 // CHECK12-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12062 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
12063 // CHECK12-NEXT:    store i32 0, i32* [[J]], align 4
12064 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12065 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
12066 // CHECK12-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
12067 // CHECK12:       land.lhs.true:
12068 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12069 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
12070 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
12071 // CHECK12:       omp.precond.then:
12072 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12073 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12074 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12075 // CHECK12-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12076 // CHECK12-NEXT:    [[CONV8:%.*]] = trunc i64 [[TMP8]] to i32
12077 // CHECK12-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12078 // CHECK12-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP9]] to i32
12079 // CHECK12-NEXT:    store i32 [[CONV8]], i32* [[DOTOMP_LB]], align 4
12080 // CHECK12-NEXT:    store i32 [[CONV9]], i32* [[DOTOMP_UB]], align 4
12081 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12082 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12083 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12084 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12085 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12086 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12087 // CHECK12-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
12088 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12089 // CHECK12:       omp.inner.for.cond:
12090 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12091 // CHECK12-NEXT:    [[CONV12:%.*]] = sext i32 [[TMP13]] to i64
12092 // CHECK12-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12093 // CHECK12-NEXT:    [[CMP13:%.*]] = icmp ule i64 [[CONV12]], [[TMP14]]
12094 // CHECK12-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12095 // CHECK12:       omp.inner.for.body:
12096 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12097 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12098 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP16]], 0
12099 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
12100 // CHECK12-NEXT:    [[MUL16:%.*]] = mul nsw i32 1, [[DIV15]]
12101 // CHECK12-NEXT:    [[DIV17:%.*]] = sdiv i32 [[TMP15]], [[MUL16]]
12102 // CHECK12-NEXT:    [[MUL18:%.*]] = mul nsw i32 [[DIV17]], 1
12103 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL18]]
12104 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I10]], align 4
12105 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12106 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12107 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12108 // CHECK12-NEXT:    [[SUB19:%.*]] = sub nsw i32 [[TMP19]], 0
12109 // CHECK12-NEXT:    [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1
12110 // CHECK12-NEXT:    [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]]
12111 // CHECK12-NEXT:    [[DIV22:%.*]] = sdiv i32 [[TMP18]], [[MUL21]]
12112 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12113 // CHECK12-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP20]], 0
12114 // CHECK12-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
12115 // CHECK12-NEXT:    [[MUL25:%.*]] = mul nsw i32 1, [[DIV24]]
12116 // CHECK12-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[DIV22]], [[MUL25]]
12117 // CHECK12-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP17]], [[MUL26]]
12118 // CHECK12-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[SUB27]], 1
12119 // CHECK12-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
12120 // CHECK12-NEXT:    store i32 [[ADD29]], i32* [[J11]], align 4
12121 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
12122 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
12123 // CHECK12-NEXT:    [[ADD30:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12124 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
12125 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
12126 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
12127 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
12128 // CHECK12-NEXT:    [[IDXPROM31:%.*]] = sext i32 [[TMP24]] to i64
12129 // CHECK12-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM31]]
12130 // CHECK12-NEXT:    store i32 [[ADD30]], i32* [[ARRAYIDX32]], align 4
12131 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12132 // CHECK12:       omp.body.continue:
12133 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12134 // CHECK12:       omp.inner.for.inc:
12135 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12136 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12137 // CHECK12-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12138 // CHECK12-NEXT:    store i32 [[ADD33]], i32* [[DOTOMP_IV]], align 4
12139 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
12140 // CHECK12:       omp.inner.for.end:
12141 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12142 // CHECK12:       omp.loop.exit:
12143 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12144 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
12145 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
12146 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
12147 // CHECK12:       omp.precond.end:
12148 // CHECK12-NEXT:    ret void
12149 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
12150 // CHECK12-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
12151 // CHECK12-NEXT:  entry:
12152 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12153 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
12154 // CHECK12-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
12155 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12156 // CHECK12-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
12157 // CHECK12-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
12158 // CHECK12-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
12159 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12160 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
12161 // CHECK12-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
12162 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12163 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
12164 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12165 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
12166 // CHECK12-NEXT:    br label [[DOTEXECUTE:%.*]]
12167 // CHECK12:       .execute:
12168 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
12169 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
12170 // CHECK12-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12171 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
12172 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
12173 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 8
12174 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
12175 // CHECK12-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
12176 // CHECK12-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
12177 // CHECK12:       .omp.deinit:
12178 // CHECK12-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
12179 // CHECK12-NEXT:    br label [[DOTEXIT:%.*]]
12180 // CHECK12:       .exit:
12181 // CHECK12-NEXT:    ret void
12182 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__10
12183 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
12184 // CHECK12-NEXT:  entry:
12185 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12186 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12187 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12188 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
12189 // CHECK12-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
12190 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12191 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12192 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12193 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12194 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
12195 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12196 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12197 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12198 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12199 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
12200 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
12201 // CHECK12-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
12202 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12203 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12204 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12205 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
12206 // CHECK12-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
12207 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12208 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
12209 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
12210 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12211 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12212 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
12213 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12214 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12215 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12216 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
12217 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12218 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
12219 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12220 // CHECK12:       omp.precond.then:
12221 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12222 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12223 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
12224 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12225 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12226 // CHECK12-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12227 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12228 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
12229 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
12230 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12231 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12232 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
12233 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12234 // CHECK12:       cond.true:
12235 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12236 // CHECK12-NEXT:    br label [[COND_END:%.*]]
12237 // CHECK12:       cond.false:
12238 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12239 // CHECK12-NEXT:    br label [[COND_END]]
12240 // CHECK12:       cond.end:
12241 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
12242 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12243 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12244 // CHECK12-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
12245 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12246 // CHECK12:       omp.inner.for.cond:
12247 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12248 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12249 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
12250 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
12251 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12252 // CHECK12:       omp.inner.for.body:
12253 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12254 // CHECK12-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
12255 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12256 // CHECK12-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
12257 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
12258 // CHECK12-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
12259 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
12260 // CHECK12-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
12261 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
12262 // CHECK12-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
12263 // CHECK12-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
12264 // CHECK12-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
12265 // CHECK12-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
12266 // CHECK12-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
12267 // CHECK12-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
12268 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
12269 // CHECK12-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
12270 // CHECK12-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
12271 // CHECK12-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
12272 // CHECK12-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
12273 // CHECK12-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
12274 // CHECK12-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
12275 // CHECK12-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
12276 // CHECK12-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
12277 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12278 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
12279 // CHECK12-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
12280 // CHECK12-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
12281 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12282 // CHECK12:       omp.inner.for.inc:
12283 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12284 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12285 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
12286 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
12287 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12288 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12289 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
12290 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
12291 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12292 // CHECK12-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12293 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
12294 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
12295 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12296 // CHECK12-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12297 // CHECK12-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
12298 // CHECK12-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
12299 // CHECK12:       cond.true11:
12300 // CHECK12-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12301 // CHECK12-NEXT:    br label [[COND_END13:%.*]]
12302 // CHECK12:       cond.false12:
12303 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12304 // CHECK12-NEXT:    br label [[COND_END13]]
12305 // CHECK12:       cond.end13:
12306 // CHECK12-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
12307 // CHECK12-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
12308 // CHECK12-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12309 // CHECK12-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
12310 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
12311 // CHECK12:       omp.inner.for.end:
12312 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12313 // CHECK12:       omp.loop.exit:
12314 // CHECK12-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12315 // CHECK12-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
12316 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
12317 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
12318 // CHECK12:       omp.precond.end:
12319 // CHECK12-NEXT:    ret void
12320 // CHECK12-LABEL: define {{[^@]+}}@__omp_outlined__11
12321 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
12322 // CHECK12-NEXT:  entry:
12323 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12324 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12325 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12326 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12327 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12328 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
12329 // CHECK12-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
12330 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12331 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12332 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12333 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12334 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
12335 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12336 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12337 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12338 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12339 // CHECK12-NEXT:    [[I5:%.*]] = alloca i32, align 4
12340 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12341 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12342 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12343 // CHECK12-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12344 // CHECK12-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12345 // CHECK12-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
12346 // CHECK12-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
12347 // CHECK12-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12348 // CHECK12-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
12349 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
12350 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12351 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12352 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
12353 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12354 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12355 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12356 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
12357 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12358 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
12359 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12360 // CHECK12:       omp.precond.then:
12361 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12362 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12363 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
12364 // CHECK12-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12365 // CHECK12-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
12366 // CHECK12-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12367 // CHECK12-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
12368 // CHECK12-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
12369 // CHECK12-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
12370 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12371 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12372 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12373 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
12374 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12375 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12376 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
12377 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12378 // CHECK12:       omp.inner.for.cond:
12379 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12380 // CHECK12-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
12381 // CHECK12-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12382 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
12383 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12384 // CHECK12:       omp.inner.for.body:
12385 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12386 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
12387 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12388 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
12389 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
12390 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
12391 // CHECK12-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
12392 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
12393 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
12394 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
12395 // CHECK12-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
12396 // CHECK12-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
12397 // CHECK12-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
12398 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12399 // CHECK12:       omp.body.continue:
12400 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12401 // CHECK12:       omp.inner.for.inc:
12402 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12403 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12404 // CHECK12-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
12405 // CHECK12-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
12406 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]]
12407 // CHECK12:       omp.inner.for.end:
12408 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12409 // CHECK12:       omp.loop.exit:
12410 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12411 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
12412 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
12413 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
12414 // CHECK12:       omp.precond.end:
12415 // CHECK12-NEXT:    ret void
12416 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
12417 // CHECK13-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
12418 // CHECK13-NEXT:  entry:
12419 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12420 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
12421 // CHECK13-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
12422 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
12423 // CHECK13-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
12424 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
12425 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
12426 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
12427 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12428 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
12429 // CHECK13-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
12430 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
12431 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12432 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
12433 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
12434 // CHECK13:       .execute:
12435 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
12436 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
12437 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
12438 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
12439 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
12440 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
12441 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
12442 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
12443 // CHECK13-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
12444 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
12445 // CHECK13:       .omp.deinit:
12446 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
12447 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
12448 // CHECK13:       .exit:
12449 // CHECK13-NEXT:    ret void
12450 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__
12451 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
12452 // CHECK13-NEXT:  entry:
12453 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12454 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12455 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12456 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
12457 // CHECK13-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
12458 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12459 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12460 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12461 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12462 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
12463 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12464 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12465 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12466 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12467 // CHECK13-NEXT:    [[I4:%.*]] = alloca i32, align 4
12468 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
12469 // CHECK13-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
12470 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
12471 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12472 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12473 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12474 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
12475 // CHECK13-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
12476 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
12477 // CHECK13-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
12478 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4
12479 // CHECK13-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
12480 // CHECK13-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4
12481 // CHECK13-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0
12482 // CHECK13-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
12483 // CHECK13-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
12484 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
12485 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
12486 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12487 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
12488 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12489 // CHECK13-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12490 // CHECK13-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12491 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
12492 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12493 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
12494 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12495 // CHECK13:       omp.precond.then:
12496 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12497 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12498 // CHECK13-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
12499 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12500 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12501 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12502 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12503 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
12504 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12505 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12506 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12507 // CHECK13-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12508 // CHECK13:       cond.true:
12509 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12510 // CHECK13-NEXT:    br label [[COND_END:%.*]]
12511 // CHECK13:       cond.false:
12512 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12513 // CHECK13-NEXT:    br label [[COND_END]]
12514 // CHECK13:       cond.end:
12515 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12516 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12517 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12518 // CHECK13-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12519 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12520 // CHECK13:       omp.inner.for.cond:
12521 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12522 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12523 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
12524 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
12525 // CHECK13-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12526 // CHECK13:       omp.inner.for.body:
12527 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12528 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12529 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
12530 // CHECK13-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
12531 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
12532 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[L_ADDR]], align 4
12533 // CHECK13-NEXT:    store i32 [[TMP23]], i32* [[L_CASTED]], align 4
12534 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[L_CASTED]], align 4
12535 // CHECK13-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
12536 // CHECK13-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP19]] to i8*
12537 // CHECK13-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
12538 // CHECK13-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
12539 // CHECK13-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP20]] to i8*
12540 // CHECK13-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
12541 // CHECK13-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
12542 // CHECK13-NEXT:    [[TMP30:%.*]] = inttoptr i32 [[TMP22]] to i8*
12543 // CHECK13-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
12544 // CHECK13-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
12545 // CHECK13-NEXT:    [[TMP32:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
12546 // CHECK13-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 4
12547 // CHECK13-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
12548 // CHECK13-NEXT:    [[TMP34:%.*]] = inttoptr i32 [[TMP24]] to i8*
12549 // CHECK13-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 4
12550 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12551 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
12552 // CHECK13-NEXT:    [[TMP37:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
12553 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP37]], i32 5)
12554 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12555 // CHECK13:       omp.inner.for.inc:
12556 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12557 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12558 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
12559 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
12560 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12561 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12562 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
12563 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
12564 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12565 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12566 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
12567 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
12568 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12569 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12570 // CHECK13-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP44]], [[TMP45]]
12571 // CHECK13-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
12572 // CHECK13:       cond.true11:
12573 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12574 // CHECK13-NEXT:    br label [[COND_END13:%.*]]
12575 // CHECK13:       cond.false12:
12576 // CHECK13-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12577 // CHECK13-NEXT:    br label [[COND_END13]]
12578 // CHECK13:       cond.end13:
12579 // CHECK13-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP46]], [[COND_TRUE11]] ], [ [[TMP47]], [[COND_FALSE12]] ]
12580 // CHECK13-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
12581 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12582 // CHECK13-NEXT:    store i32 [[TMP48]], i32* [[DOTOMP_IV]], align 4
12583 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
12584 // CHECK13:       omp.inner.for.end:
12585 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12586 // CHECK13:       omp.loop.exit:
12587 // CHECK13-NEXT:    [[TMP49:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12588 // CHECK13-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP49]], align 4
12589 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP50]])
12590 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12591 // CHECK13-NEXT:    [[TMP52:%.*]] = icmp ne i32 [[TMP51]], 0
12592 // CHECK13-NEXT:    br i1 [[TMP52]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
12593 // CHECK13:       .omp.lastprivate.then:
12594 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[L_ADDR]], align 4
12595 // CHECK13-NEXT:    store i32 [[TMP53]], i32* [[L_ADDR]], align 4
12596 // CHECK13-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
12597 // CHECK13:       .omp.lastprivate.done:
12598 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
12599 // CHECK13:       omp.precond.end:
12600 // CHECK13-NEXT:    [[TMP54:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
12601 // CHECK13-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP54]])
12602 // CHECK13-NEXT:    ret void
12603 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__1
12604 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
12605 // CHECK13-NEXT:  entry:
12606 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12607 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12608 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12609 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12610 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12611 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
12612 // CHECK13-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
12613 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12614 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12615 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12616 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12617 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
12618 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12619 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12620 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12621 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12622 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
12623 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12624 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12625 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12626 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12627 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12628 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
12629 // CHECK13-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
12630 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
12631 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
12632 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12633 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12634 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
12635 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12636 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12637 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12638 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
12639 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12640 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
12641 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12642 // CHECK13:       omp.precond.then:
12643 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12644 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12645 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
12646 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12647 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12648 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
12649 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
12650 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12651 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12652 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12653 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
12654 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
12655 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12656 // CHECK13:       omp.dispatch.cond:
12657 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12658 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12659 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
12660 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12661 // CHECK13:       cond.true:
12662 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12663 // CHECK13-NEXT:    br label [[COND_END:%.*]]
12664 // CHECK13:       cond.false:
12665 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12666 // CHECK13-NEXT:    br label [[COND_END]]
12667 // CHECK13:       cond.end:
12668 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
12669 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12670 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12671 // CHECK13-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
12672 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12673 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12674 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
12675 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12676 // CHECK13:       omp.dispatch.body:
12677 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12678 // CHECK13:       omp.inner.for.cond:
12679 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12680 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12681 // CHECK13-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
12682 // CHECK13-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12683 // CHECK13:       omp.inner.for.body:
12684 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12685 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
12686 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12687 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
12688 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
12689 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
12690 // CHECK13-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
12691 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
12692 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
12693 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12694 // CHECK13:       omp.body.continue:
12695 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12696 // CHECK13:       omp.inner.for.inc:
12697 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12698 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
12699 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
12700 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
12701 // CHECK13:       omp.inner.for.end:
12702 // CHECK13-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
12703 // CHECK13:       omp.dispatch.inc:
12704 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12705 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12706 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
12707 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
12708 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12709 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12710 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
12711 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
12712 // CHECK13-NEXT:    br label [[OMP_DISPATCH_COND]]
12713 // CHECK13:       omp.dispatch.end:
12714 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12715 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
12716 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
12717 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12718 // CHECK13-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
12719 // CHECK13-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
12720 // CHECK13:       .omp.lastprivate.then:
12721 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
12722 // CHECK13-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
12723 // CHECK13-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
12724 // CHECK13:       .omp.lastprivate.done:
12725 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
12726 // CHECK13:       omp.precond.end:
12727 // CHECK13-NEXT:    ret void
12728 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
12729 // CHECK13-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
12730 // CHECK13-NEXT:  entry:
12731 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12732 // CHECK13-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
12733 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
12734 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
12735 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
12736 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
12737 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12738 // CHECK13-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
12739 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
12740 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12741 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
12742 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
12743 // CHECK13:       .execute:
12744 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
12745 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
12746 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
12747 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
12748 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
12749 // CHECK13-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
12750 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
12751 // CHECK13:       .omp.deinit:
12752 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
12753 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
12754 // CHECK13:       .exit:
12755 // CHECK13-NEXT:    ret void
12756 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__2
12757 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
12758 // CHECK13-NEXT:  entry:
12759 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12760 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12761 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12762 // CHECK13-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
12763 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12764 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12765 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12766 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12767 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
12768 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12769 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12770 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12771 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12772 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
12773 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
12774 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
12775 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12776 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12777 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12778 // CHECK13-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
12779 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
12780 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
12781 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12782 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12783 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
12784 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12785 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12786 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12787 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
12788 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12789 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
12790 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12791 // CHECK13:       omp.precond.then:
12792 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12793 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12794 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
12795 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12796 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12797 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12798 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12799 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
12800 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
12801 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12802 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12803 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
12804 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12805 // CHECK13:       cond.true:
12806 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12807 // CHECK13-NEXT:    br label [[COND_END:%.*]]
12808 // CHECK13:       cond.false:
12809 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12810 // CHECK13-NEXT:    br label [[COND_END]]
12811 // CHECK13:       cond.end:
12812 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
12813 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12814 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12815 // CHECK13-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
12816 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12817 // CHECK13:       omp.inner.for.cond:
12818 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12819 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12820 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
12821 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
12822 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12823 // CHECK13:       omp.inner.for.body:
12824 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12825 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12826 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
12827 // CHECK13-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
12828 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
12829 // CHECK13-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
12830 // CHECK13-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
12831 // CHECK13-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
12832 // CHECK13-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
12833 // CHECK13-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
12834 // CHECK13-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
12835 // CHECK13-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
12836 // CHECK13-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
12837 // CHECK13-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
12838 // CHECK13-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
12839 // CHECK13-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
12840 // CHECK13-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
12841 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12842 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
12843 // CHECK13-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
12844 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
12845 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12846 // CHECK13:       omp.inner.for.inc:
12847 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12848 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12849 // CHECK13-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
12850 // CHECK13-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
12851 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12852 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12853 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
12854 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
12855 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12856 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12857 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
12858 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
12859 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12860 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12861 // CHECK13-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
12862 // CHECK13-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
12863 // CHECK13:       cond.true10:
12864 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12865 // CHECK13-NEXT:    br label [[COND_END12:%.*]]
12866 // CHECK13:       cond.false11:
12867 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12868 // CHECK13-NEXT:    br label [[COND_END12]]
12869 // CHECK13:       cond.end12:
12870 // CHECK13-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
12871 // CHECK13-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
12872 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12873 // CHECK13-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
12874 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
12875 // CHECK13:       omp.inner.for.end:
12876 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12877 // CHECK13:       omp.loop.exit:
12878 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12879 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
12880 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
12881 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
12882 // CHECK13:       omp.precond.end:
12883 // CHECK13-NEXT:    ret void
12884 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__3
12885 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
12886 // CHECK13-NEXT:  entry:
12887 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12888 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12889 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12890 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12891 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12892 // CHECK13-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
12893 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12894 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12895 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12896 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12897 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
12898 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12899 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12900 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12901 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12902 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
12903 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12904 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12905 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12906 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12907 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12908 // CHECK13-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
12909 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
12910 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
12911 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12912 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12913 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
12914 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12915 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12916 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12917 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
12918 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12919 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
12920 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12921 // CHECK13:       omp.precond.then:
12922 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12923 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12924 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
12925 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12926 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12927 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
12928 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
12929 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12930 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12931 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12932 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
12933 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12934 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12935 // CHECK13-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
12936 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12937 // CHECK13:       omp.inner.for.cond:
12938 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12939 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12940 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
12941 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12942 // CHECK13:       omp.inner.for.body:
12943 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12944 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
12945 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12946 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
12947 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
12948 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
12949 // CHECK13-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
12950 // CHECK13-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
12951 // CHECK13-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
12952 // CHECK13-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
12953 // CHECK13-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
12954 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12955 // CHECK13:       omp.body.continue:
12956 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12957 // CHECK13:       omp.inner.for.inc:
12958 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12959 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12960 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
12961 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
12962 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
12963 // CHECK13:       omp.inner.for.end:
12964 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12965 // CHECK13:       omp.loop.exit:
12966 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12967 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
12968 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
12969 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
12970 // CHECK13:       omp.precond.end:
12971 // CHECK13-NEXT:    ret void
12972 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
12973 // CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
12974 // CHECK13-NEXT:  entry:
12975 // CHECK13-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12976 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
12977 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
12978 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
12979 // CHECK13-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12980 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12981 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
12982 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
12983 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
12984 // CHECK13:       .execute:
12985 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
12986 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
12987 // CHECK13-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
12988 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
12989 // CHECK13:       .omp.deinit:
12990 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
12991 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
12992 // CHECK13:       .exit:
12993 // CHECK13-NEXT:    ret void
12994 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__4
12995 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
12996 // CHECK13-NEXT:  entry:
12997 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12998 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12999 // CHECK13-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
13000 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13001 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13002 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13003 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13004 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13005 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13006 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13007 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
13008 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13009 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13010 // CHECK13-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
13011 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
13012 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13013 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
13014 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13015 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13016 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13017 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13018 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13019 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
13020 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13021 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13022 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13023 // CHECK13:       cond.true:
13024 // CHECK13-NEXT:    br label [[COND_END:%.*]]
13025 // CHECK13:       cond.false:
13026 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13027 // CHECK13-NEXT:    br label [[COND_END]]
13028 // CHECK13:       cond.end:
13029 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13030 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13031 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13032 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13033 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13034 // CHECK13:       omp.inner.for.cond:
13035 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13036 // CHECK13-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
13037 // CHECK13-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13038 // CHECK13:       omp.inner.for.body:
13039 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13040 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13041 // CHECK13-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
13042 // CHECK13-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
13043 // CHECK13-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
13044 // CHECK13-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
13045 // CHECK13-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
13046 // CHECK13-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
13047 // CHECK13-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
13048 // CHECK13-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
13049 // CHECK13-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
13050 // CHECK13-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
13051 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
13052 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13053 // CHECK13:       omp.inner.for.inc:
13054 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13055 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13056 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
13057 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
13058 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13059 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13060 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13061 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
13062 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13063 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13064 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
13065 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
13066 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13067 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
13068 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
13069 // CHECK13:       cond.true5:
13070 // CHECK13-NEXT:    br label [[COND_END7:%.*]]
13071 // CHECK13:       cond.false6:
13072 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13073 // CHECK13-NEXT:    br label [[COND_END7]]
13074 // CHECK13:       cond.end7:
13075 // CHECK13-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
13076 // CHECK13-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
13077 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13078 // CHECK13-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
13079 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13080 // CHECK13:       omp.inner.for.end:
13081 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13082 // CHECK13:       omp.loop.exit:
13083 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13084 // CHECK13-NEXT:    ret void
13085 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__5
13086 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
13087 // CHECK13-NEXT:  entry:
13088 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13089 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13090 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13091 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13092 // CHECK13-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
13093 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13094 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13095 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13096 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13097 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13098 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13099 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13100 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13101 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13102 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13103 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13104 // CHECK13-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
13105 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
13106 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13107 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13108 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13109 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13110 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
13111 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
13112 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13113 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13114 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13115 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
13116 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13117 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13118 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13119 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13120 // CHECK13:       omp.inner.for.cond:
13121 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13122 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13123 // CHECK13-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
13124 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13125 // CHECK13:       omp.inner.for.body:
13126 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13127 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
13128 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13129 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
13130 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
13131 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
13132 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
13133 // CHECK13-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
13134 // CHECK13-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
13135 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13136 // CHECK13:       omp.body.continue:
13137 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13138 // CHECK13:       omp.inner.for.inc:
13139 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13140 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13141 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
13142 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
13143 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13144 // CHECK13:       omp.inner.for.end:
13145 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13146 // CHECK13:       omp.loop.exit:
13147 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
13148 // CHECK13-NEXT:    ret void
13149 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
13150 // CHECK13-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
13151 // CHECK13-NEXT:  entry:
13152 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13153 // CHECK13-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
13154 // CHECK13-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
13155 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
13156 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
13157 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
13158 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13159 // CHECK13-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
13160 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13161 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13162 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
13163 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
13164 // CHECK13:       .execute:
13165 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
13166 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
13167 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
13168 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
13169 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
13170 // CHECK13-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
13171 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
13172 // CHECK13:       .omp.deinit:
13173 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
13174 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
13175 // CHECK13:       .exit:
13176 // CHECK13-NEXT:    ret void
13177 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__6
13178 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
13179 // CHECK13-NEXT:  entry:
13180 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13181 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13182 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13183 // CHECK13-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
13184 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13185 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13186 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
13187 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13188 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13189 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13190 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13191 // CHECK13-NEXT:    [[K:%.*]] = alloca i32, align 4
13192 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13193 // CHECK13-NEXT:    [[J:%.*]] = alloca i32, align 4
13194 // CHECK13-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
13195 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
13196 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13197 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13198 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13199 // CHECK13-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
13200 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13201 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13202 // CHECK13-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
13203 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13204 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13205 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13206 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13207 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
13208 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
13209 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13210 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
13211 // CHECK13-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13212 // CHECK13:       cond.true:
13213 // CHECK13-NEXT:    br label [[COND_END:%.*]]
13214 // CHECK13:       cond.false:
13215 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13216 // CHECK13-NEXT:    br label [[COND_END]]
13217 // CHECK13:       cond.end:
13218 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13219 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13220 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13221 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13222 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13223 // CHECK13:       omp.inner.for.cond:
13224 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13225 // CHECK13-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
13226 // CHECK13-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13227 // CHECK13:       omp.inner.for.body:
13228 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13229 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13230 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
13231 // CHECK13-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
13232 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
13233 // CHECK13-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
13234 // CHECK13-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
13235 // CHECK13-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
13236 // CHECK13-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
13237 // CHECK13-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
13238 // CHECK13-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
13239 // CHECK13-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
13240 // CHECK13-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
13241 // CHECK13-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
13242 // CHECK13-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
13243 // CHECK13-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
13244 // CHECK13-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
13245 // CHECK13-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
13246 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
13247 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13248 // CHECK13:       omp.inner.for.inc:
13249 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13250 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13251 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
13252 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
13253 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13254 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13255 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
13256 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
13257 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13258 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13259 // CHECK13-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
13260 // CHECK13-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
13261 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13262 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
13263 // CHECK13-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
13264 // CHECK13:       cond.true6:
13265 // CHECK13-NEXT:    br label [[COND_END8:%.*]]
13266 // CHECK13:       cond.false7:
13267 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13268 // CHECK13-NEXT:    br label [[COND_END8]]
13269 // CHECK13:       cond.end8:
13270 // CHECK13-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
13271 // CHECK13-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
13272 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13273 // CHECK13-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
13274 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13275 // CHECK13:       omp.inner.for.end:
13276 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13277 // CHECK13:       omp.loop.exit:
13278 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13279 // CHECK13-NEXT:    ret void
13280 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__7
13281 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
13282 // CHECK13-NEXT:  entry:
13283 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13284 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13285 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13286 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13287 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13288 // CHECK13-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
13289 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13290 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13291 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
13292 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13293 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13294 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13295 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13296 // CHECK13-NEXT:    [[K:%.*]] = alloca i32, align 4
13297 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13298 // CHECK13-NEXT:    [[J:%.*]] = alloca i32, align 4
13299 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13300 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13301 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13302 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13303 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13304 // CHECK13-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
13305 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13306 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13307 // CHECK13-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
13308 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13309 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13310 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
13311 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
13312 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13313 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13314 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13315 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
13316 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13317 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13318 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
13319 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13320 // CHECK13:       omp.inner.for.cond:
13321 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13322 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13323 // CHECK13-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
13324 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13325 // CHECK13:       omp.inner.for.body:
13326 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13327 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
13328 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
13329 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13330 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
13331 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13332 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13333 // CHECK13-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
13334 // CHECK13-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
13335 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
13336 // CHECK13-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
13337 // CHECK13-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
13338 // CHECK13-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
13339 // CHECK13-NEXT:    store i32 10, i32* [[K]], align 4
13340 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
13341 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
13342 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
13343 // CHECK13-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
13344 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
13345 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
13346 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
13347 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
13348 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
13349 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
13350 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
13351 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
13352 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13353 // CHECK13:       omp.body.continue:
13354 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13355 // CHECK13:       omp.inner.for.inc:
13356 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13357 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13358 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
13359 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
13360 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13361 // CHECK13:       omp.inner.for.end:
13362 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13363 // CHECK13:       omp.loop.exit:
13364 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
13365 // CHECK13-NEXT:    ret void
13366 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
13367 // CHECK13-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
13368 // CHECK13-NEXT:  entry:
13369 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13370 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13371 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13372 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
13373 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
13374 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
13375 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13376 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13377 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13378 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13379 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
13380 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
13381 // CHECK13:       .execute:
13382 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
13383 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13384 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
13385 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
13386 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
13387 // CHECK13-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
13388 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
13389 // CHECK13:       .omp.deinit:
13390 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
13391 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
13392 // CHECK13:       .exit:
13393 // CHECK13-NEXT:    ret void
13394 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__8
13395 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
13396 // CHECK13-NEXT:  entry:
13397 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13398 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13399 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13400 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13401 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13402 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13403 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
13404 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13405 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13406 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
13407 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13408 // CHECK13-NEXT:    [[J:%.*]] = alloca i32, align 4
13409 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
13410 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
13411 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
13412 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13413 // CHECK13-NEXT:    [[I9:%.*]] = alloca i32, align 4
13414 // CHECK13-NEXT:    [[J10:%.*]] = alloca i32, align 4
13415 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13416 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
13417 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13418 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13419 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13420 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13421 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13422 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13423 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
13424 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13425 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13426 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13427 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
13428 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13429 // CHECK13-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
13430 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13431 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
13432 // CHECK13-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
13433 // CHECK13-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
13434 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
13435 // CHECK13-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
13436 // CHECK13-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
13437 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
13438 // CHECK13-NEXT:    store i32 0, i32* [[J]], align 4
13439 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13440 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13441 // CHECK13-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
13442 // CHECK13:       land.lhs.true:
13443 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13444 // CHECK13-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
13445 // CHECK13-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
13446 // CHECK13:       omp.precond.then:
13447 // CHECK13-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
13448 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13449 // CHECK13-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
13450 // CHECK13-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
13451 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13452 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13453 // CHECK13-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
13454 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13455 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13456 // CHECK13-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
13457 // CHECK13-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13458 // CHECK13-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13459 // CHECK13-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
13460 // CHECK13-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13461 // CHECK13:       cond.true:
13462 // CHECK13-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13463 // CHECK13-NEXT:    br label [[COND_END:%.*]]
13464 // CHECK13:       cond.false:
13465 // CHECK13-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13466 // CHECK13-NEXT:    br label [[COND_END]]
13467 // CHECK13:       cond.end:
13468 // CHECK13-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13469 // CHECK13-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
13470 // CHECK13-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
13471 // CHECK13-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
13472 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13473 // CHECK13:       omp.inner.for.cond:
13474 // CHECK13-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13475 // CHECK13-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13476 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
13477 // CHECK13-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
13478 // CHECK13-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13479 // CHECK13:       omp.inner.for.body:
13480 // CHECK13-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
13481 // CHECK13-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
13482 // CHECK13-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13483 // CHECK13-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
13484 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
13485 // CHECK13-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
13486 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
13487 // CHECK13-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
13488 // CHECK13-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
13489 // CHECK13-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
13490 // CHECK13-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
13491 // CHECK13-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
13492 // CHECK13-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
13493 // CHECK13-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
13494 // CHECK13-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
13495 // CHECK13-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
13496 // CHECK13-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
13497 // CHECK13-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
13498 // CHECK13-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
13499 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13500 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
13501 // CHECK13-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
13502 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
13503 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13504 // CHECK13:       omp.inner.for.inc:
13505 // CHECK13-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13506 // CHECK13-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
13507 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
13508 // CHECK13-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
13509 // CHECK13-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
13510 // CHECK13-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
13511 // CHECK13-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
13512 // CHECK13-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
13513 // CHECK13-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13514 // CHECK13-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
13515 // CHECK13-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
13516 // CHECK13-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
13517 // CHECK13-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13518 // CHECK13-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13519 // CHECK13-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
13520 // CHECK13-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
13521 // CHECK13:       cond.true18:
13522 // CHECK13-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13523 // CHECK13-NEXT:    br label [[COND_END20:%.*]]
13524 // CHECK13:       cond.false19:
13525 // CHECK13-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
13526 // CHECK13-NEXT:    br label [[COND_END20]]
13527 // CHECK13:       cond.end20:
13528 // CHECK13-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
13529 // CHECK13-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
13530 // CHECK13-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
13531 // CHECK13-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
13532 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13533 // CHECK13:       omp.inner.for.end:
13534 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13535 // CHECK13:       omp.loop.exit:
13536 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13537 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
13538 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
13539 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
13540 // CHECK13:       omp.precond.end:
13541 // CHECK13-NEXT:    ret void
13542 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__9
13543 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
13544 // CHECK13-NEXT:  entry:
13545 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13546 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13547 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13548 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13549 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13550 // CHECK13-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
13551 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13552 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13553 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
13554 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13555 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13556 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
13557 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13558 // CHECK13-NEXT:    [[J:%.*]] = alloca i32, align 4
13559 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
13560 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
13561 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
13562 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13563 // CHECK13-NEXT:    [[I11:%.*]] = alloca i32, align 4
13564 // CHECK13-NEXT:    [[J12:%.*]] = alloca i32, align 4
13565 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13566 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13567 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13568 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13569 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13570 // CHECK13-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
13571 // CHECK13-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
13572 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13573 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
13574 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13575 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13576 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13577 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
13578 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13579 // CHECK13-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
13580 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13581 // CHECK13-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
13582 // CHECK13-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
13583 // CHECK13-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
13584 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
13585 // CHECK13-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
13586 // CHECK13-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
13587 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
13588 // CHECK13-NEXT:    store i32 0, i32* [[J]], align 4
13589 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13590 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13591 // CHECK13-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
13592 // CHECK13:       land.lhs.true:
13593 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13594 // CHECK13-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
13595 // CHECK13-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
13596 // CHECK13:       omp.precond.then:
13597 // CHECK13-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
13598 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
13599 // CHECK13-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
13600 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13601 // CHECK13-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
13602 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13603 // CHECK13-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
13604 // CHECK13-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
13605 // CHECK13-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
13606 // CHECK13-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
13607 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13608 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13609 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13610 // CHECK13-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
13611 // CHECK13-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
13612 // CHECK13-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
13613 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13614 // CHECK13:       omp.inner.for.cond:
13615 // CHECK13-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13616 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13617 // CHECK13-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
13618 // CHECK13-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
13619 // CHECK13-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13620 // CHECK13:       omp.inner.for.body:
13621 // CHECK13-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13622 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13623 // CHECK13-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
13624 // CHECK13-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
13625 // CHECK13-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
13626 // CHECK13-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
13627 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
13628 // CHECK13-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
13629 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
13630 // CHECK13-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
13631 // CHECK13-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
13632 // CHECK13-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13633 // CHECK13-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13634 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13635 // CHECK13-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
13636 // CHECK13-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
13637 // CHECK13-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
13638 // CHECK13-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
13639 // CHECK13-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
13640 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13641 // CHECK13-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
13642 // CHECK13-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
13643 // CHECK13-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
13644 // CHECK13-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
13645 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
13646 // CHECK13-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
13647 // CHECK13-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
13648 // CHECK13-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
13649 // CHECK13-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
13650 // CHECK13-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
13651 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
13652 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
13653 // CHECK13-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
13654 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
13655 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
13656 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
13657 // CHECK13-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
13658 // CHECK13-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
13659 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13660 // CHECK13:       omp.body.continue:
13661 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13662 // CHECK13:       omp.inner.for.inc:
13663 // CHECK13-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
13664 // CHECK13-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
13665 // CHECK13-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
13666 // CHECK13-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
13667 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13668 // CHECK13:       omp.inner.for.end:
13669 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13670 // CHECK13:       omp.loop.exit:
13671 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13672 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
13673 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
13674 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
13675 // CHECK13:       omp.precond.end:
13676 // CHECK13-NEXT:    ret void
13677 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
13678 // CHECK13-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
13679 // CHECK13-NEXT:  entry:
13680 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13681 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
13682 // CHECK13-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
13683 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13684 // CHECK13-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
13685 // CHECK13-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
13686 // CHECK13-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
13687 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13688 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
13689 // CHECK13-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
13690 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
13691 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13692 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
13693 // CHECK13-NEXT:    br label [[DOTEXECUTE:%.*]]
13694 // CHECK13:       .execute:
13695 // CHECK13-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
13696 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13697 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
13698 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
13699 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
13700 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
13701 // CHECK13-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
13702 // CHECK13-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
13703 // CHECK13:       .omp.deinit:
13704 // CHECK13-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
13705 // CHECK13-NEXT:    br label [[DOTEXIT:%.*]]
13706 // CHECK13:       .exit:
13707 // CHECK13-NEXT:    ret void
13708 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__10
13709 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
13710 // CHECK13-NEXT:  entry:
13711 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13712 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13713 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13714 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
13715 // CHECK13-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
13716 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13717 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13718 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13719 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13720 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13721 // CHECK13-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13722 // CHECK13-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13723 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13724 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13725 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
13726 // CHECK13-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13727 // CHECK13-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
13728 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13729 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13730 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13731 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
13732 // CHECK13-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
13733 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
13734 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13735 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
13736 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13737 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
13738 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13739 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13740 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13741 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
13742 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13743 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
13744 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13745 // CHECK13:       omp.precond.then:
13746 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13747 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13748 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
13749 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13750 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13751 // CHECK13-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13752 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13753 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
13754 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
13755 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13756 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13757 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
13758 // CHECK13-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13759 // CHECK13:       cond.true:
13760 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13761 // CHECK13-NEXT:    br label [[COND_END:%.*]]
13762 // CHECK13:       cond.false:
13763 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13764 // CHECK13-NEXT:    br label [[COND_END]]
13765 // CHECK13:       cond.end:
13766 // CHECK13-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
13767 // CHECK13-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13768 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13769 // CHECK13-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
13770 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13771 // CHECK13:       omp.inner.for.cond:
13772 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13773 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13774 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
13775 // CHECK13-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
13776 // CHECK13-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13777 // CHECK13:       omp.inner.for.body:
13778 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13779 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13780 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
13781 // CHECK13-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
13782 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
13783 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
13784 // CHECK13-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
13785 // CHECK13-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
13786 // CHECK13-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
13787 // CHECK13-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
13788 // CHECK13-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
13789 // CHECK13-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
13790 // CHECK13-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
13791 // CHECK13-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
13792 // CHECK13-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
13793 // CHECK13-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
13794 // CHECK13-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
13795 // CHECK13-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
13796 // CHECK13-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
13797 // CHECK13-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
13798 // CHECK13-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
13799 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13800 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
13801 // CHECK13-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
13802 // CHECK13-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
13803 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13804 // CHECK13:       omp.inner.for.inc:
13805 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13806 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13807 // CHECK13-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
13808 // CHECK13-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
13809 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13810 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13811 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
13812 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
13813 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13814 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13815 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
13816 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
13817 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13818 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13819 // CHECK13-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
13820 // CHECK13-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
13821 // CHECK13:       cond.true10:
13822 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13823 // CHECK13-NEXT:    br label [[COND_END12:%.*]]
13824 // CHECK13:       cond.false11:
13825 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13826 // CHECK13-NEXT:    br label [[COND_END12]]
13827 // CHECK13:       cond.end12:
13828 // CHECK13-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
13829 // CHECK13-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
13830 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13831 // CHECK13-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
13832 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13833 // CHECK13:       omp.inner.for.end:
13834 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13835 // CHECK13:       omp.loop.exit:
13836 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13837 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
13838 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
13839 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
13840 // CHECK13:       omp.precond.end:
13841 // CHECK13-NEXT:    ret void
13842 // CHECK13-LABEL: define {{[^@]+}}@__omp_outlined__11
13843 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
13844 // CHECK13-NEXT:  entry:
13845 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13846 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13847 // CHECK13-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13848 // CHECK13-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13849 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13850 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
13851 // CHECK13-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
13852 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13853 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13854 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13855 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13856 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
13857 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13858 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13859 // CHECK13-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13860 // CHECK13-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13861 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
13862 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13863 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13864 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13865 // CHECK13-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13866 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13867 // CHECK13-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
13868 // CHECK13-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
13869 // CHECK13-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
13870 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13871 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
13872 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13873 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
13874 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13875 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13876 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13877 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
13878 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13879 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
13880 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13881 // CHECK13:       omp.precond.then:
13882 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13883 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13884 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
13885 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13886 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13887 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
13888 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
13889 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13890 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13891 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13892 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
13893 // CHECK13-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13894 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13895 // CHECK13-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
13896 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13897 // CHECK13:       omp.inner.for.cond:
13898 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13899 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13900 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
13901 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13902 // CHECK13:       omp.inner.for.body:
13903 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13904 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
13905 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13906 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
13907 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
13908 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
13909 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
13910 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
13911 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
13912 // CHECK13-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
13913 // CHECK13-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
13914 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13915 // CHECK13:       omp.body.continue:
13916 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13917 // CHECK13:       omp.inner.for.inc:
13918 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13919 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13920 // CHECK13-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
13921 // CHECK13-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
13922 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]]
13923 // CHECK13:       omp.inner.for.end:
13924 // CHECK13-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13925 // CHECK13:       omp.loop.exit:
13926 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13927 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
13928 // CHECK13-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
13929 // CHECK13-NEXT:    br label [[OMP_PRECOND_END]]
13930 // CHECK13:       omp.precond.end:
13931 // CHECK13-NEXT:    ret void
13932 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
13933 // CHECK14-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
13934 // CHECK14-NEXT:  entry:
13935 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13936 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
13937 // CHECK14-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
13938 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13939 // CHECK14-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
13940 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
13941 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
13942 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
13943 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13944 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
13945 // CHECK14-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
13946 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
13947 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
13948 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
13949 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
13950 // CHECK14:       .execute:
13951 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
13952 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13953 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
13954 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
13955 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
13956 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
13957 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
13958 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
13959 // CHECK14-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
13960 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
13961 // CHECK14:       .omp.deinit:
13962 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
13963 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
13964 // CHECK14:       .exit:
13965 // CHECK14-NEXT:    ret void
13966 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__
13967 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
13968 // CHECK14-NEXT:  entry:
13969 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13970 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13971 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13972 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
13973 // CHECK14-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
13974 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13975 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13976 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13977 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13978 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
13979 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13980 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13981 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13982 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13983 // CHECK14-NEXT:    [[I4:%.*]] = alloca i32, align 4
13984 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13985 // CHECK14-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
13986 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
13987 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13988 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13989 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13990 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
13991 // CHECK14-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
13992 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
13993 // CHECK14-NEXT:    [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
13994 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4
13995 // CHECK14-NEXT:    call void @__kmpc_get_team_static_memory(i16 1, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
13996 // CHECK14-NEXT:    [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4
13997 // CHECK14-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0
13998 // CHECK14-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty*
13999 // CHECK14-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0
14000 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
14001 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
14002 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14003 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
14004 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14005 // CHECK14-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14006 // CHECK14-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14007 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
14008 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14009 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
14010 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14011 // CHECK14:       omp.precond.then:
14012 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14013 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14014 // CHECK14-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
14015 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14016 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14017 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14018 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14019 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
14020 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14021 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14022 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14023 // CHECK14-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14024 // CHECK14:       cond.true:
14025 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14026 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14027 // CHECK14:       cond.false:
14028 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14029 // CHECK14-NEXT:    br label [[COND_END]]
14030 // CHECK14:       cond.end:
14031 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14032 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14033 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14034 // CHECK14-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14035 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14036 // CHECK14:       omp.inner.for.cond:
14037 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14038 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14039 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
14040 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
14041 // CHECK14-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14042 // CHECK14:       omp.inner.for.body:
14043 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14044 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14045 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
14046 // CHECK14-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
14047 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
14048 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[L_ADDR]], align 4
14049 // CHECK14-NEXT:    store i32 [[TMP23]], i32* [[L_CASTED]], align 4
14050 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[L_CASTED]], align 4
14051 // CHECK14-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
14052 // CHECK14-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP19]] to i8*
14053 // CHECK14-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
14054 // CHECK14-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
14055 // CHECK14-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP20]] to i8*
14056 // CHECK14-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
14057 // CHECK14-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
14058 // CHECK14-NEXT:    [[TMP30:%.*]] = inttoptr i32 [[TMP22]] to i8*
14059 // CHECK14-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
14060 // CHECK14-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
14061 // CHECK14-NEXT:    [[TMP32:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
14062 // CHECK14-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 4
14063 // CHECK14-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
14064 // CHECK14-NEXT:    [[TMP34:%.*]] = inttoptr i32 [[TMP24]] to i8*
14065 // CHECK14-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 4
14066 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14067 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
14068 // CHECK14-NEXT:    [[TMP37:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
14069 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP37]], i32 5)
14070 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14071 // CHECK14:       omp.inner.for.inc:
14072 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14073 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14074 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
14075 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
14076 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14077 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14078 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
14079 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
14080 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14081 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14082 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
14083 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
14084 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14085 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14086 // CHECK14-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP44]], [[TMP45]]
14087 // CHECK14-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
14088 // CHECK14:       cond.true11:
14089 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14090 // CHECK14-NEXT:    br label [[COND_END13:%.*]]
14091 // CHECK14:       cond.false12:
14092 // CHECK14-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14093 // CHECK14-NEXT:    br label [[COND_END13]]
14094 // CHECK14:       cond.end13:
14095 // CHECK14-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP46]], [[COND_TRUE11]] ], [ [[TMP47]], [[COND_FALSE12]] ]
14096 // CHECK14-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
14097 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14098 // CHECK14-NEXT:    store i32 [[TMP48]], i32* [[DOTOMP_IV]], align 4
14099 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14100 // CHECK14:       omp.inner.for.end:
14101 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14102 // CHECK14:       omp.loop.exit:
14103 // CHECK14-NEXT:    [[TMP49:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14104 // CHECK14-NEXT:    [[TMP50:%.*]] = load i32, i32* [[TMP49]], align 4
14105 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP50]])
14106 // CHECK14-NEXT:    [[TMP51:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14107 // CHECK14-NEXT:    [[TMP52:%.*]] = icmp ne i32 [[TMP51]], 0
14108 // CHECK14-NEXT:    br i1 [[TMP52]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
14109 // CHECK14:       .omp.lastprivate.then:
14110 // CHECK14-NEXT:    [[TMP53:%.*]] = load i32, i32* [[L_ADDR]], align 4
14111 // CHECK14-NEXT:    store i32 [[TMP53]], i32* [[L_ADDR]], align 4
14112 // CHECK14-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
14113 // CHECK14:       .omp.lastprivate.done:
14114 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
14115 // CHECK14:       omp.precond.end:
14116 // CHECK14-NEXT:    [[TMP54:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
14117 // CHECK14-NEXT:    call void @__kmpc_restore_team_static_memory(i16 1, i16 [[TMP54]])
14118 // CHECK14-NEXT:    ret void
14119 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__1
14120 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
14121 // CHECK14-NEXT:  entry:
14122 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14123 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14124 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14125 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14126 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14127 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
14128 // CHECK14-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
14129 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14130 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14131 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14132 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14133 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14134 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14135 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14136 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14137 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14138 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
14139 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14140 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14141 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14142 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14143 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14144 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
14145 // CHECK14-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
14146 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
14147 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14148 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
14149 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14150 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
14151 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14152 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14153 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14154 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
14155 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14156 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14157 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14158 // CHECK14:       omp.precond.then:
14159 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14160 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14161 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
14162 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14163 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14164 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
14165 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
14166 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14167 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14168 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14169 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
14170 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
14171 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
14172 // CHECK14:       omp.dispatch.cond:
14173 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14174 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14175 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
14176 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14177 // CHECK14:       cond.true:
14178 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14179 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14180 // CHECK14:       cond.false:
14181 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14182 // CHECK14-NEXT:    br label [[COND_END]]
14183 // CHECK14:       cond.end:
14184 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14185 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14186 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14187 // CHECK14-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
14188 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14189 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14190 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14191 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14192 // CHECK14:       omp.dispatch.body:
14193 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14194 // CHECK14:       omp.inner.for.cond:
14195 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14196 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14197 // CHECK14-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14198 // CHECK14-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14199 // CHECK14:       omp.inner.for.body:
14200 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14201 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14202 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14203 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
14204 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
14205 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
14206 // CHECK14-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
14207 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
14208 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
14209 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14210 // CHECK14:       omp.body.continue:
14211 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14212 // CHECK14:       omp.inner.for.inc:
14213 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14214 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
14215 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
14216 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14217 // CHECK14:       omp.inner.for.end:
14218 // CHECK14-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
14219 // CHECK14:       omp.dispatch.inc:
14220 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14221 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14222 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
14223 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
14224 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14225 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14226 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
14227 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
14228 // CHECK14-NEXT:    br label [[OMP_DISPATCH_COND]]
14229 // CHECK14:       omp.dispatch.end:
14230 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14231 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
14232 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
14233 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14234 // CHECK14-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
14235 // CHECK14-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
14236 // CHECK14:       .omp.lastprivate.then:
14237 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
14238 // CHECK14-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
14239 // CHECK14-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
14240 // CHECK14:       .omp.lastprivate.done:
14241 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
14242 // CHECK14:       omp.precond.end:
14243 // CHECK14-NEXT:    ret void
14244 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
14245 // CHECK14-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
14246 // CHECK14-NEXT:  entry:
14247 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14248 // CHECK14-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
14249 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14250 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
14251 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
14252 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
14253 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14254 // CHECK14-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
14255 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
14256 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14257 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
14258 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
14259 // CHECK14:       .execute:
14260 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
14261 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14262 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
14263 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
14264 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
14265 // CHECK14-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
14266 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
14267 // CHECK14:       .omp.deinit:
14268 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
14269 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
14270 // CHECK14:       .exit:
14271 // CHECK14-NEXT:    ret void
14272 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__2
14273 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
14274 // CHECK14-NEXT:  entry:
14275 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14276 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14277 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14278 // CHECK14-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
14279 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14280 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14281 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14282 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14283 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14284 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14285 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14286 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14287 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14288 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
14289 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14290 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
14291 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14292 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14293 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14294 // CHECK14-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
14295 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
14296 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14297 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
14298 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14299 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
14300 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14301 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14302 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14303 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
14304 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14305 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14306 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14307 // CHECK14:       omp.precond.then:
14308 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14309 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14310 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
14311 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14312 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14313 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14314 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14315 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
14316 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
14317 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14318 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14319 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
14320 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14321 // CHECK14:       cond.true:
14322 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14323 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14324 // CHECK14:       cond.false:
14325 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14326 // CHECK14-NEXT:    br label [[COND_END]]
14327 // CHECK14:       cond.end:
14328 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
14329 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14330 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14331 // CHECK14-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
14332 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14333 // CHECK14:       omp.inner.for.cond:
14334 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14335 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14336 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
14337 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
14338 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14339 // CHECK14:       omp.inner.for.body:
14340 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14341 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14342 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
14343 // CHECK14-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
14344 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
14345 // CHECK14-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
14346 // CHECK14-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
14347 // CHECK14-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
14348 // CHECK14-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
14349 // CHECK14-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
14350 // CHECK14-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
14351 // CHECK14-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
14352 // CHECK14-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
14353 // CHECK14-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
14354 // CHECK14-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
14355 // CHECK14-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
14356 // CHECK14-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
14357 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14358 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
14359 // CHECK14-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
14360 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
14361 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14362 // CHECK14:       omp.inner.for.inc:
14363 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14364 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14365 // CHECK14-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
14366 // CHECK14-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
14367 // CHECK14-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14368 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14369 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
14370 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
14371 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14372 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14373 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
14374 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
14375 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14376 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14377 // CHECK14-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
14378 // CHECK14-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
14379 // CHECK14:       cond.true10:
14380 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14381 // CHECK14-NEXT:    br label [[COND_END12:%.*]]
14382 // CHECK14:       cond.false11:
14383 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14384 // CHECK14-NEXT:    br label [[COND_END12]]
14385 // CHECK14:       cond.end12:
14386 // CHECK14-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
14387 // CHECK14-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
14388 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14389 // CHECK14-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
14390 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14391 // CHECK14:       omp.inner.for.end:
14392 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14393 // CHECK14:       omp.loop.exit:
14394 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14395 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
14396 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
14397 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
14398 // CHECK14:       omp.precond.end:
14399 // CHECK14-NEXT:    ret void
14400 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__3
14401 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
14402 // CHECK14-NEXT:  entry:
14403 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14404 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14405 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14406 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14407 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14408 // CHECK14-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
14409 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14410 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14411 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14412 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14413 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14414 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14415 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14416 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14417 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14418 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
14419 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14420 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14421 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14422 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14423 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14424 // CHECK14-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
14425 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
14426 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14427 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
14428 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14429 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
14430 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14431 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14432 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14433 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
14434 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14435 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14436 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14437 // CHECK14:       omp.precond.then:
14438 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14439 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14440 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
14441 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14442 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14443 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
14444 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
14445 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14446 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14447 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14448 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
14449 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14450 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14451 // CHECK14-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
14452 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14453 // CHECK14:       omp.inner.for.cond:
14454 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14455 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14456 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
14457 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14458 // CHECK14:       omp.inner.for.body:
14459 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14460 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
14461 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14462 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
14463 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
14464 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
14465 // CHECK14-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
14466 // CHECK14-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
14467 // CHECK14-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
14468 // CHECK14-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
14469 // CHECK14-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
14470 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14471 // CHECK14:       omp.body.continue:
14472 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14473 // CHECK14:       omp.inner.for.inc:
14474 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14475 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14476 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
14477 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
14478 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14479 // CHECK14:       omp.inner.for.end:
14480 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14481 // CHECK14:       omp.loop.exit:
14482 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14483 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
14484 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
14485 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
14486 // CHECK14:       omp.precond.end:
14487 // CHECK14-NEXT:    ret void
14488 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
14489 // CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
14490 // CHECK14-NEXT:  entry:
14491 // CHECK14-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14492 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
14493 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
14494 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
14495 // CHECK14-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14496 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14497 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14498 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
14499 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
14500 // CHECK14:       .execute:
14501 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
14502 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
14503 // CHECK14-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
14504 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
14505 // CHECK14:       .omp.deinit:
14506 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
14507 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
14508 // CHECK14:       .exit:
14509 // CHECK14-NEXT:    ret void
14510 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__4
14511 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
14512 // CHECK14-NEXT:  entry:
14513 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14514 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14515 // CHECK14-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14516 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14517 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14518 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14519 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14520 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14521 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14522 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14523 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
14524 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14525 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14526 // CHECK14-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14527 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14528 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14529 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
14530 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14531 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14532 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14533 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14534 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
14535 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
14536 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14537 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
14538 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14539 // CHECK14:       cond.true:
14540 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14541 // CHECK14:       cond.false:
14542 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14543 // CHECK14-NEXT:    br label [[COND_END]]
14544 // CHECK14:       cond.end:
14545 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
14546 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14547 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14548 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14549 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14550 // CHECK14:       omp.inner.for.cond:
14551 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14552 // CHECK14-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
14553 // CHECK14-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14554 // CHECK14:       omp.inner.for.body:
14555 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14556 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14557 // CHECK14-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
14558 // CHECK14-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
14559 // CHECK14-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
14560 // CHECK14-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
14561 // CHECK14-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
14562 // CHECK14-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
14563 // CHECK14-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
14564 // CHECK14-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
14565 // CHECK14-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
14566 // CHECK14-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
14567 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
14568 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14569 // CHECK14:       omp.inner.for.inc:
14570 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14571 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14572 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
14573 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
14574 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14575 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14576 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
14577 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
14578 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14579 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14580 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
14581 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
14582 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14583 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
14584 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
14585 // CHECK14:       cond.true5:
14586 // CHECK14-NEXT:    br label [[COND_END7:%.*]]
14587 // CHECK14:       cond.false6:
14588 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14589 // CHECK14-NEXT:    br label [[COND_END7]]
14590 // CHECK14:       cond.end7:
14591 // CHECK14-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
14592 // CHECK14-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
14593 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14594 // CHECK14-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
14595 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14596 // CHECK14:       omp.inner.for.end:
14597 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14598 // CHECK14:       omp.loop.exit:
14599 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
14600 // CHECK14-NEXT:    ret void
14601 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__5
14602 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
14603 // CHECK14-NEXT:  entry:
14604 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14605 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14606 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14607 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14608 // CHECK14-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14609 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14610 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14611 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14612 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14613 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14614 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14615 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14616 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14617 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14618 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14619 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14620 // CHECK14-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14621 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14622 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14623 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14624 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14625 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14626 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
14627 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
14628 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14629 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14630 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14631 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
14632 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14633 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14634 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14635 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14636 // CHECK14:       omp.inner.for.cond:
14637 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14638 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14639 // CHECK14-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
14640 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14641 // CHECK14:       omp.inner.for.body:
14642 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14643 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
14644 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14645 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
14646 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
14647 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
14648 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
14649 // CHECK14-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
14650 // CHECK14-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
14651 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14652 // CHECK14:       omp.body.continue:
14653 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14654 // CHECK14:       omp.inner.for.inc:
14655 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14656 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14657 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
14658 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
14659 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14660 // CHECK14:       omp.inner.for.end:
14661 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14662 // CHECK14:       omp.loop.exit:
14663 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
14664 // CHECK14-NEXT:    ret void
14665 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
14666 // CHECK14-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
14667 // CHECK14-NEXT:  entry:
14668 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
14669 // CHECK14-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
14670 // CHECK14-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
14671 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
14672 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
14673 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
14674 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
14675 // CHECK14-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
14676 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
14677 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14678 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
14679 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
14680 // CHECK14:       .execute:
14681 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
14682 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
14683 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
14684 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
14685 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
14686 // CHECK14-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
14687 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
14688 // CHECK14:       .omp.deinit:
14689 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
14690 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
14691 // CHECK14:       .exit:
14692 // CHECK14-NEXT:    ret void
14693 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__6
14694 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
14695 // CHECK14-NEXT:  entry:
14696 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14697 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14698 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
14699 // CHECK14-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
14700 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14701 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14702 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
14703 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14704 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14705 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14706 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14707 // CHECK14-NEXT:    [[K:%.*]] = alloca i32, align 4
14708 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14709 // CHECK14-NEXT:    [[J:%.*]] = alloca i32, align 4
14710 // CHECK14-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
14711 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
14712 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14713 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14714 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
14715 // CHECK14-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
14716 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
14717 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14718 // CHECK14-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
14719 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14720 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14721 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14722 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14723 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
14724 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
14725 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14726 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
14727 // CHECK14-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14728 // CHECK14:       cond.true:
14729 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14730 // CHECK14:       cond.false:
14731 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14732 // CHECK14-NEXT:    br label [[COND_END]]
14733 // CHECK14:       cond.end:
14734 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
14735 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14736 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14737 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14738 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14739 // CHECK14:       omp.inner.for.cond:
14740 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14741 // CHECK14-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
14742 // CHECK14-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14743 // CHECK14:       omp.inner.for.body:
14744 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14745 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14746 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
14747 // CHECK14-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
14748 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
14749 // CHECK14-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
14750 // CHECK14-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
14751 // CHECK14-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
14752 // CHECK14-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
14753 // CHECK14-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
14754 // CHECK14-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
14755 // CHECK14-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
14756 // CHECK14-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
14757 // CHECK14-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
14758 // CHECK14-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
14759 // CHECK14-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
14760 // CHECK14-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
14761 // CHECK14-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
14762 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
14763 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14764 // CHECK14:       omp.inner.for.inc:
14765 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14766 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14767 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
14768 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
14769 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14770 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14771 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
14772 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
14773 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14774 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14775 // CHECK14-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
14776 // CHECK14-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
14777 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14778 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
14779 // CHECK14-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
14780 // CHECK14:       cond.true6:
14781 // CHECK14-NEXT:    br label [[COND_END8:%.*]]
14782 // CHECK14:       cond.false7:
14783 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14784 // CHECK14-NEXT:    br label [[COND_END8]]
14785 // CHECK14:       cond.end8:
14786 // CHECK14-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
14787 // CHECK14-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
14788 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14789 // CHECK14-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
14790 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14791 // CHECK14:       omp.inner.for.end:
14792 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14793 // CHECK14:       omp.loop.exit:
14794 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
14795 // CHECK14-NEXT:    ret void
14796 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__7
14797 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
14798 // CHECK14-NEXT:  entry:
14799 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14800 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14801 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14802 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14803 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
14804 // CHECK14-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
14805 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14806 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14807 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
14808 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14809 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14810 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14811 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14812 // CHECK14-NEXT:    [[K:%.*]] = alloca i32, align 4
14813 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14814 // CHECK14-NEXT:    [[J:%.*]] = alloca i32, align 4
14815 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14816 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14817 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14818 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14819 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
14820 // CHECK14-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
14821 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
14822 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14823 // CHECK14-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
14824 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14825 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14826 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
14827 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
14828 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14829 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14830 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14831 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
14832 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14833 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14834 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14835 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14836 // CHECK14:       omp.inner.for.cond:
14837 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14838 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14839 // CHECK14-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
14840 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14841 // CHECK14:       omp.inner.for.body:
14842 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14843 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
14844 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
14845 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14846 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
14847 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14848 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14849 // CHECK14-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
14850 // CHECK14-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
14851 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
14852 // CHECK14-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
14853 // CHECK14-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
14854 // CHECK14-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
14855 // CHECK14-NEXT:    store i32 10, i32* [[K]], align 4
14856 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
14857 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
14858 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
14859 // CHECK14-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
14860 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
14861 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
14862 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
14863 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
14864 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
14865 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
14866 // CHECK14-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
14867 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
14868 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14869 // CHECK14:       omp.body.continue:
14870 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14871 // CHECK14:       omp.inner.for.inc:
14872 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14873 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14874 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
14875 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
14876 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
14877 // CHECK14:       omp.inner.for.end:
14878 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14879 // CHECK14:       omp.loop.exit:
14880 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
14881 // CHECK14-NEXT:    ret void
14882 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
14883 // CHECK14-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
14884 // CHECK14-NEXT:  entry:
14885 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14886 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
14887 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14888 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
14889 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
14890 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
14891 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14892 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
14893 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
14894 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14895 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
14896 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
14897 // CHECK14:       .execute:
14898 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
14899 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14900 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
14901 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
14902 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
14903 // CHECK14-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
14904 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
14905 // CHECK14:       .omp.deinit:
14906 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
14907 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
14908 // CHECK14:       .exit:
14909 // CHECK14-NEXT:    ret void
14910 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__8
14911 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
14912 // CHECK14-NEXT:  entry:
14913 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14914 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14915 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14916 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
14917 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
14918 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14919 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
14920 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14921 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14922 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
14923 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
14924 // CHECK14-NEXT:    [[J:%.*]] = alloca i32, align 4
14925 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
14926 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
14927 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
14928 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14929 // CHECK14-NEXT:    [[I9:%.*]] = alloca i32, align 4
14930 // CHECK14-NEXT:    [[J10:%.*]] = alloca i32, align 4
14931 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14932 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
14933 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14934 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14935 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14936 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
14937 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
14938 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14939 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
14940 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14941 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14942 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14943 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
14944 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14945 // CHECK14-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
14946 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14947 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
14948 // CHECK14-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
14949 // CHECK14-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
14950 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
14951 // CHECK14-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
14952 // CHECK14-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
14953 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
14954 // CHECK14-NEXT:    store i32 0, i32* [[J]], align 4
14955 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14956 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14957 // CHECK14-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
14958 // CHECK14:       land.lhs.true:
14959 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14960 // CHECK14-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
14961 // CHECK14-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
14962 // CHECK14:       omp.precond.then:
14963 // CHECK14-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
14964 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
14965 // CHECK14-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
14966 // CHECK14-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
14967 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14968 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
14969 // CHECK14-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
14970 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14971 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14972 // CHECK14-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
14973 // CHECK14-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
14974 // CHECK14-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
14975 // CHECK14-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
14976 // CHECK14-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14977 // CHECK14:       cond.true:
14978 // CHECK14-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
14979 // CHECK14-NEXT:    br label [[COND_END:%.*]]
14980 // CHECK14:       cond.false:
14981 // CHECK14-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
14982 // CHECK14-NEXT:    br label [[COND_END]]
14983 // CHECK14:       cond.end:
14984 // CHECK14-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14985 // CHECK14-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
14986 // CHECK14-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
14987 // CHECK14-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
14988 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14989 // CHECK14:       omp.inner.for.cond:
14990 // CHECK14-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
14991 // CHECK14-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
14992 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
14993 // CHECK14-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
14994 // CHECK14-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14995 // CHECK14:       omp.inner.for.body:
14996 // CHECK14-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
14997 // CHECK14-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
14998 // CHECK14-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
14999 // CHECK14-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
15000 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
15001 // CHECK14-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
15002 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
15003 // CHECK14-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
15004 // CHECK14-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
15005 // CHECK14-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
15006 // CHECK14-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
15007 // CHECK14-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
15008 // CHECK14-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
15009 // CHECK14-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
15010 // CHECK14-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
15011 // CHECK14-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
15012 // CHECK14-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
15013 // CHECK14-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
15014 // CHECK14-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
15015 // CHECK14-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15016 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
15017 // CHECK14-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
15018 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
15019 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15020 // CHECK14:       omp.inner.for.inc:
15021 // CHECK14-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15022 // CHECK14-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
15023 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
15024 // CHECK14-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
15025 // CHECK14-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
15026 // CHECK14-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
15027 // CHECK14-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
15028 // CHECK14-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
15029 // CHECK14-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
15030 // CHECK14-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
15031 // CHECK14-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
15032 // CHECK14-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
15033 // CHECK14-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
15034 // CHECK14-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
15035 // CHECK14-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
15036 // CHECK14-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
15037 // CHECK14:       cond.true18:
15038 // CHECK14-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
15039 // CHECK14-NEXT:    br label [[COND_END20:%.*]]
15040 // CHECK14:       cond.false19:
15041 // CHECK14-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
15042 // CHECK14-NEXT:    br label [[COND_END20]]
15043 // CHECK14:       cond.end20:
15044 // CHECK14-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
15045 // CHECK14-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
15046 // CHECK14-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
15047 // CHECK14-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
15048 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
15049 // CHECK14:       omp.inner.for.end:
15050 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15051 // CHECK14:       omp.loop.exit:
15052 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15053 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
15054 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
15055 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
15056 // CHECK14:       omp.precond.end:
15057 // CHECK14-NEXT:    ret void
15058 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__9
15059 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
15060 // CHECK14-NEXT:  entry:
15061 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15062 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15063 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15064 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15065 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15066 // CHECK14-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
15067 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
15068 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15069 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
15070 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15071 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15072 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
15073 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
15074 // CHECK14-NEXT:    [[J:%.*]] = alloca i32, align 4
15075 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
15076 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
15077 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
15078 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15079 // CHECK14-NEXT:    [[I11:%.*]] = alloca i32, align 4
15080 // CHECK14-NEXT:    [[J12:%.*]] = alloca i32, align 4
15081 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15082 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15083 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15084 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15085 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15086 // CHECK14-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
15087 // CHECK14-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
15088 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15089 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15090 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15091 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15092 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15093 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
15094 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15095 // CHECK14-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
15096 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15097 // CHECK14-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
15098 // CHECK14-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
15099 // CHECK14-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
15100 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
15101 // CHECK14-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
15102 // CHECK14-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
15103 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
15104 // CHECK14-NEXT:    store i32 0, i32* [[J]], align 4
15105 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15106 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
15107 // CHECK14-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
15108 // CHECK14:       land.lhs.true:
15109 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15110 // CHECK14-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
15111 // CHECK14-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
15112 // CHECK14:       omp.precond.then:
15113 // CHECK14-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
15114 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
15115 // CHECK14-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
15116 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15117 // CHECK14-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
15118 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15119 // CHECK14-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
15120 // CHECK14-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
15121 // CHECK14-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
15122 // CHECK14-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
15123 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15124 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15125 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
15126 // CHECK14-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
15127 // CHECK14-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
15128 // CHECK14-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
15129 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15130 // CHECK14:       omp.inner.for.cond:
15131 // CHECK14-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15132 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15133 // CHECK14-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
15134 // CHECK14-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
15135 // CHECK14-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15136 // CHECK14:       omp.inner.for.body:
15137 // CHECK14-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15138 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15139 // CHECK14-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
15140 // CHECK14-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
15141 // CHECK14-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
15142 // CHECK14-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
15143 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
15144 // CHECK14-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
15145 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
15146 // CHECK14-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
15147 // CHECK14-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
15148 // CHECK14-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15149 // CHECK14-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15150 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15151 // CHECK14-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
15152 // CHECK14-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
15153 // CHECK14-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
15154 // CHECK14-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
15155 // CHECK14-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
15156 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15157 // CHECK14-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
15158 // CHECK14-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
15159 // CHECK14-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
15160 // CHECK14-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
15161 // CHECK14-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
15162 // CHECK14-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
15163 // CHECK14-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
15164 // CHECK14-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
15165 // CHECK14-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
15166 // CHECK14-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
15167 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
15168 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
15169 // CHECK14-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
15170 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
15171 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
15172 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
15173 // CHECK14-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
15174 // CHECK14-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
15175 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15176 // CHECK14:       omp.body.continue:
15177 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15178 // CHECK14:       omp.inner.for.inc:
15179 // CHECK14-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15180 // CHECK14-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
15181 // CHECK14-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
15182 // CHECK14-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
15183 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
15184 // CHECK14:       omp.inner.for.end:
15185 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15186 // CHECK14:       omp.loop.exit:
15187 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15188 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
15189 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
15190 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
15191 // CHECK14:       omp.precond.end:
15192 // CHECK14-NEXT:    ret void
15193 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
15194 // CHECK14-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
15195 // CHECK14-NEXT:  entry:
15196 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15197 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15198 // CHECK14-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
15199 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15200 // CHECK14-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
15201 // CHECK14-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
15202 // CHECK14-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
15203 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15204 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15205 // CHECK14-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
15206 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15207 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
15208 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
15209 // CHECK14-NEXT:    br label [[DOTEXECUTE:%.*]]
15210 // CHECK14:       .execute:
15211 // CHECK14-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
15212 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15213 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15214 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15215 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
15216 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
15217 // CHECK14-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
15218 // CHECK14-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
15219 // CHECK14:       .omp.deinit:
15220 // CHECK14-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
15221 // CHECK14-NEXT:    br label [[DOTEXIT:%.*]]
15222 // CHECK14:       .exit:
15223 // CHECK14-NEXT:    ret void
15224 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__10
15225 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
15226 // CHECK14-NEXT:  entry:
15227 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15228 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15229 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15230 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15231 // CHECK14-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
15232 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15233 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15234 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15235 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15236 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
15237 // CHECK14-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15238 // CHECK14-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15239 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15240 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15241 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
15242 // CHECK14-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15243 // CHECK14-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
15244 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15245 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15246 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15247 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15248 // CHECK14-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
15249 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15250 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15251 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15252 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15253 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
15254 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15255 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15256 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15257 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
15258 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15259 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15260 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15261 // CHECK14:       omp.precond.then:
15262 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15263 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15264 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
15265 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15266 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15267 // CHECK14-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
15268 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15269 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
15270 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
15271 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15272 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15273 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
15274 // CHECK14-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15275 // CHECK14:       cond.true:
15276 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15277 // CHECK14-NEXT:    br label [[COND_END:%.*]]
15278 // CHECK14:       cond.false:
15279 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15280 // CHECK14-NEXT:    br label [[COND_END]]
15281 // CHECK14:       cond.end:
15282 // CHECK14-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
15283 // CHECK14-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15284 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15285 // CHECK14-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
15286 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15287 // CHECK14:       omp.inner.for.cond:
15288 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15289 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15290 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
15291 // CHECK14-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
15292 // CHECK14-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15293 // CHECK14:       omp.inner.for.body:
15294 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15295 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15296 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
15297 // CHECK14-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
15298 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
15299 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
15300 // CHECK14-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
15301 // CHECK14-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
15302 // CHECK14-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
15303 // CHECK14-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
15304 // CHECK14-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
15305 // CHECK14-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
15306 // CHECK14-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
15307 // CHECK14-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
15308 // CHECK14-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
15309 // CHECK14-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
15310 // CHECK14-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
15311 // CHECK14-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
15312 // CHECK14-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
15313 // CHECK14-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
15314 // CHECK14-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
15315 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15316 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
15317 // CHECK14-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
15318 // CHECK14-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
15319 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15320 // CHECK14:       omp.inner.for.inc:
15321 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15322 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15323 // CHECK14-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
15324 // CHECK14-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
15325 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15326 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15327 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
15328 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
15329 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15330 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15331 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
15332 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
15333 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15334 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15335 // CHECK14-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
15336 // CHECK14-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
15337 // CHECK14:       cond.true10:
15338 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15339 // CHECK14-NEXT:    br label [[COND_END12:%.*]]
15340 // CHECK14:       cond.false11:
15341 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15342 // CHECK14-NEXT:    br label [[COND_END12]]
15343 // CHECK14:       cond.end12:
15344 // CHECK14-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
15345 // CHECK14-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
15346 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15347 // CHECK14-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
15348 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
15349 // CHECK14:       omp.inner.for.end:
15350 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15351 // CHECK14:       omp.loop.exit:
15352 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15353 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
15354 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
15355 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
15356 // CHECK14:       omp.precond.end:
15357 // CHECK14-NEXT:    ret void
15358 // CHECK14-LABEL: define {{[^@]+}}@__omp_outlined__11
15359 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
15360 // CHECK14-NEXT:  entry:
15361 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15362 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15363 // CHECK14-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15364 // CHECK14-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15365 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15366 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15367 // CHECK14-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
15368 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15369 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15370 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15371 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15372 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
15373 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15374 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15375 // CHECK14-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15376 // CHECK14-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15377 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
15378 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15379 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15380 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15381 // CHECK14-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15382 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15383 // CHECK14-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15384 // CHECK14-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
15385 // CHECK14-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15386 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15387 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15388 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15389 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
15390 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15391 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15392 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15393 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
15394 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15395 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15396 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15397 // CHECK14:       omp.precond.then:
15398 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15399 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15400 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
15401 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15402 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15403 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
15404 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
15405 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15406 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15407 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15408 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
15409 // CHECK14-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15410 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15411 // CHECK14-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
15412 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15413 // CHECK14:       omp.inner.for.cond:
15414 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15415 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15416 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
15417 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15418 // CHECK14:       omp.inner.for.body:
15419 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15420 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
15421 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15422 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
15423 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
15424 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
15425 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
15426 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
15427 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
15428 // CHECK14-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
15429 // CHECK14-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
15430 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15431 // CHECK14:       omp.body.continue:
15432 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15433 // CHECK14:       omp.inner.for.inc:
15434 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15435 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15436 // CHECK14-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
15437 // CHECK14-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
15438 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]]
15439 // CHECK14:       omp.inner.for.end:
15440 // CHECK14-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15441 // CHECK14:       omp.loop.exit:
15442 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15443 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
15444 // CHECK14-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
15445 // CHECK14-NEXT:    br label [[OMP_PRECOND_END]]
15446 // CHECK14:       omp.precond.end:
15447 // CHECK14-NEXT:    ret void
15448 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
15449 // CHECK15-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
15450 // CHECK15-NEXT:  entry:
15451 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15452 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15453 // CHECK15-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
15454 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15455 // CHECK15-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
15456 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
15457 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
15458 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
15459 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15460 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15461 // CHECK15-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
15462 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15463 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
15464 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
15465 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
15466 // CHECK15:       .execute:
15467 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
15468 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15469 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15470 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15471 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
15472 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
15473 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
15474 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
15475 // CHECK15-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
15476 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
15477 // CHECK15:       .omp.deinit:
15478 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
15479 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
15480 // CHECK15:       .exit:
15481 // CHECK15-NEXT:    ret void
15482 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__
15483 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
15484 // CHECK15-NEXT:  entry:
15485 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15486 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15487 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15488 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15489 // CHECK15-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
15490 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15491 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15492 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15493 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15494 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15495 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15496 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15497 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15498 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15499 // CHECK15-NEXT:    [[I4:%.*]] = alloca i32, align 4
15500 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15501 // CHECK15-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
15502 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
15503 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15504 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15505 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15506 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15507 // CHECK15-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
15508 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15509 // CHECK15-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1)
15510 // CHECK15-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
15511 // CHECK15-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
15512 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
15513 // CHECK15-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
15514 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15515 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
15516 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15517 // CHECK15-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15518 // CHECK15-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15519 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15520 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15521 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
15522 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15523 // CHECK15:       omp.precond.then:
15524 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15525 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15526 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
15527 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15528 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15529 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15530 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
15531 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
15532 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15533 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15534 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
15535 // CHECK15-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15536 // CHECK15:       cond.true:
15537 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15538 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15539 // CHECK15:       cond.false:
15540 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15541 // CHECK15-NEXT:    br label [[COND_END]]
15542 // CHECK15:       cond.end:
15543 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
15544 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15545 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15546 // CHECK15-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
15547 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15548 // CHECK15:       omp.inner.for.cond:
15549 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15550 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15551 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
15552 // CHECK15-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
15553 // CHECK15-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15554 // CHECK15:       omp.inner.for.body:
15555 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15556 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15557 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
15558 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4
15559 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
15560 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[L_ADDR]], align 4
15561 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[L_CASTED]], align 4
15562 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[L_CASTED]], align 4
15563 // CHECK15-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
15564 // CHECK15-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP16]] to i8*
15565 // CHECK15-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
15566 // CHECK15-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
15567 // CHECK15-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
15568 // CHECK15-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
15569 // CHECK15-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
15570 // CHECK15-NEXT:    [[TMP27:%.*]] = inttoptr i32 [[TMP19]] to i8*
15571 // CHECK15-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
15572 // CHECK15-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
15573 // CHECK15-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
15574 // CHECK15-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
15575 // CHECK15-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
15576 // CHECK15-NEXT:    [[TMP31:%.*]] = inttoptr i32 [[TMP21]] to i8*
15577 // CHECK15-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 4
15578 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15579 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
15580 // CHECK15-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
15581 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i32 5)
15582 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15583 // CHECK15:       omp.inner.for.inc:
15584 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15585 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15586 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
15587 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
15588 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15589 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15590 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
15591 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
15592 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15593 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15594 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
15595 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
15596 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15597 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15598 // CHECK15-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
15599 // CHECK15-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
15600 // CHECK15:       cond.true11:
15601 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15602 // CHECK15-NEXT:    br label [[COND_END13:%.*]]
15603 // CHECK15:       cond.false12:
15604 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15605 // CHECK15-NEXT:    br label [[COND_END13]]
15606 // CHECK15:       cond.end13:
15607 // CHECK15-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE11]] ], [ [[TMP44]], [[COND_FALSE12]] ]
15608 // CHECK15-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
15609 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15610 // CHECK15-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
15611 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
15612 // CHECK15:       omp.inner.for.end:
15613 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15614 // CHECK15:       omp.loop.exit:
15615 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15616 // CHECK15-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
15617 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP47]])
15618 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15619 // CHECK15-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
15620 // CHECK15-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
15621 // CHECK15:       .omp.lastprivate.then:
15622 // CHECK15-NEXT:    [[TMP50:%.*]] = load i32, i32* [[L_ADDR]], align 4
15623 // CHECK15-NEXT:    store i32 [[TMP50]], i32* [[L_ADDR]], align 4
15624 // CHECK15-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
15625 // CHECK15:       .omp.lastprivate.done:
15626 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15627 // CHECK15:       omp.precond.end:
15628 // CHECK15-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
15629 // CHECK15-NEXT:    ret void
15630 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__1
15631 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
15632 // CHECK15-NEXT:  entry:
15633 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15634 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15635 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15636 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15637 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15638 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
15639 // CHECK15-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
15640 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15641 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15642 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15643 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15644 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15645 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15646 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15647 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15648 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15649 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15650 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15651 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15652 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15653 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15654 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15655 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
15656 // CHECK15-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
15657 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
15658 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15659 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15660 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15661 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
15662 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15663 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15664 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15665 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15666 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15667 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15668 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15669 // CHECK15:       omp.precond.then:
15670 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15671 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15672 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
15673 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15674 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15675 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
15676 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
15677 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15678 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15679 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15680 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
15681 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
15682 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15683 // CHECK15:       omp.dispatch.cond:
15684 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15685 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15686 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
15687 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15688 // CHECK15:       cond.true:
15689 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15690 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15691 // CHECK15:       cond.false:
15692 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15693 // CHECK15-NEXT:    br label [[COND_END]]
15694 // CHECK15:       cond.end:
15695 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
15696 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
15697 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15698 // CHECK15-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
15699 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15700 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15701 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
15702 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15703 // CHECK15:       omp.dispatch.body:
15704 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15705 // CHECK15:       omp.inner.for.cond:
15706 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15707 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15708 // CHECK15-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
15709 // CHECK15-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15710 // CHECK15:       omp.inner.for.body:
15711 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15712 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
15713 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15714 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
15715 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
15716 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
15717 // CHECK15-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
15718 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
15719 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
15720 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15721 // CHECK15:       omp.body.continue:
15722 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15723 // CHECK15:       omp.inner.for.inc:
15724 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15725 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
15726 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
15727 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
15728 // CHECK15:       omp.inner.for.end:
15729 // CHECK15-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15730 // CHECK15:       omp.dispatch.inc:
15731 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15732 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15733 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
15734 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
15735 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15736 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15737 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
15738 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
15739 // CHECK15-NEXT:    br label [[OMP_DISPATCH_COND]]
15740 // CHECK15:       omp.dispatch.end:
15741 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15742 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
15743 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
15744 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15745 // CHECK15-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
15746 // CHECK15-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
15747 // CHECK15:       .omp.lastprivate.then:
15748 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
15749 // CHECK15-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
15750 // CHECK15-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
15751 // CHECK15:       .omp.lastprivate.done:
15752 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15753 // CHECK15:       omp.precond.end:
15754 // CHECK15-NEXT:    ret void
15755 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
15756 // CHECK15-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
15757 // CHECK15-NEXT:  entry:
15758 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15759 // CHECK15-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
15760 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15761 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
15762 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
15763 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
15764 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15765 // CHECK15-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
15766 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
15767 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
15768 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
15769 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
15770 // CHECK15:       .execute:
15771 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
15772 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15773 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
15774 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
15775 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
15776 // CHECK15-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
15777 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
15778 // CHECK15:       .omp.deinit:
15779 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
15780 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
15781 // CHECK15:       .exit:
15782 // CHECK15-NEXT:    ret void
15783 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__2
15784 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
15785 // CHECK15-NEXT:  entry:
15786 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15787 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15788 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15789 // CHECK15-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
15790 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15791 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15792 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15793 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15794 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15795 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15796 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15797 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15798 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15799 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15800 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
15801 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
15802 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15803 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15804 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15805 // CHECK15-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
15806 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
15807 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15808 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15809 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15810 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
15811 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15812 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15813 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15814 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15815 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15816 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15817 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15818 // CHECK15:       omp.precond.then:
15819 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15820 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15821 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
15822 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15823 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15824 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
15825 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15826 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
15827 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
15828 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15829 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15830 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
15831 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15832 // CHECK15:       cond.true:
15833 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15834 // CHECK15-NEXT:    br label [[COND_END:%.*]]
15835 // CHECK15:       cond.false:
15836 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15837 // CHECK15-NEXT:    br label [[COND_END]]
15838 // CHECK15:       cond.end:
15839 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
15840 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15841 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15842 // CHECK15-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
15843 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15844 // CHECK15:       omp.inner.for.cond:
15845 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15846 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15847 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
15848 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
15849 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15850 // CHECK15:       omp.inner.for.body:
15851 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15852 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15853 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
15854 // CHECK15-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
15855 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
15856 // CHECK15-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
15857 // CHECK15-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
15858 // CHECK15-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
15859 // CHECK15-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
15860 // CHECK15-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
15861 // CHECK15-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
15862 // CHECK15-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
15863 // CHECK15-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
15864 // CHECK15-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
15865 // CHECK15-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
15866 // CHECK15-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
15867 // CHECK15-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
15868 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15869 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
15870 // CHECK15-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
15871 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
15872 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15873 // CHECK15:       omp.inner.for.inc:
15874 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15875 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15876 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
15877 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
15878 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15879 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15880 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
15881 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
15882 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15883 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15884 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
15885 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
15886 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15887 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15888 // CHECK15-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
15889 // CHECK15-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
15890 // CHECK15:       cond.true10:
15891 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15892 // CHECK15-NEXT:    br label [[COND_END12:%.*]]
15893 // CHECK15:       cond.false11:
15894 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15895 // CHECK15-NEXT:    br label [[COND_END12]]
15896 // CHECK15:       cond.end12:
15897 // CHECK15-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
15898 // CHECK15-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
15899 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15900 // CHECK15-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
15901 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
15902 // CHECK15:       omp.inner.for.end:
15903 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15904 // CHECK15:       omp.loop.exit:
15905 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15906 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
15907 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
15908 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15909 // CHECK15:       omp.precond.end:
15910 // CHECK15-NEXT:    ret void
15911 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__3
15912 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
15913 // CHECK15-NEXT:  entry:
15914 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
15915 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
15916 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15917 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15918 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15919 // CHECK15-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
15920 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15921 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15922 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15923 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15924 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15925 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15926 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15927 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15928 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15929 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15930 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
15931 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
15932 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15933 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15934 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15935 // CHECK15-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
15936 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
15937 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15938 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
15939 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15940 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
15941 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15942 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15943 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15944 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15945 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15946 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15947 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15948 // CHECK15:       omp.precond.then:
15949 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15950 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15951 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
15952 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
15953 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15954 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
15955 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
15956 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15957 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15958 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15959 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
15960 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15961 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15962 // CHECK15-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
15963 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15964 // CHECK15:       omp.inner.for.cond:
15965 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15966 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
15967 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
15968 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15969 // CHECK15:       omp.inner.for.body:
15970 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15971 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
15972 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15973 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
15974 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
15975 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
15976 // CHECK15-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
15977 // CHECK15-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
15978 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
15979 // CHECK15-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
15980 // CHECK15-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
15981 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15982 // CHECK15:       omp.body.continue:
15983 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15984 // CHECK15:       omp.inner.for.inc:
15985 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15986 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15987 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
15988 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
15989 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
15990 // CHECK15:       omp.inner.for.end:
15991 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15992 // CHECK15:       omp.loop.exit:
15993 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
15994 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
15995 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
15996 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
15997 // CHECK15:       omp.precond.end:
15998 // CHECK15-NEXT:    ret void
15999 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
16000 // CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
16001 // CHECK15-NEXT:  entry:
16002 // CHECK15-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
16003 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
16004 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
16005 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
16006 // CHECK15-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
16007 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
16008 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16009 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
16010 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
16011 // CHECK15:       .execute:
16012 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
16013 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
16014 // CHECK15-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
16015 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
16016 // CHECK15:       .omp.deinit:
16017 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
16018 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
16019 // CHECK15:       .exit:
16020 // CHECK15-NEXT:    ret void
16021 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__4
16022 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
16023 // CHECK15-NEXT:  entry:
16024 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16025 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16026 // CHECK15-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
16027 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16028 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16029 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16030 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16031 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16032 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16033 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16034 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
16035 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16036 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16037 // CHECK15-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
16038 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
16039 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16040 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
16041 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16042 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16043 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16044 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16045 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16046 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
16047 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16048 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16049 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16050 // CHECK15:       cond.true:
16051 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16052 // CHECK15:       cond.false:
16053 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16054 // CHECK15-NEXT:    br label [[COND_END]]
16055 // CHECK15:       cond.end:
16056 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16057 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16058 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16059 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16060 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16061 // CHECK15:       omp.inner.for.cond:
16062 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16063 // CHECK15-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
16064 // CHECK15-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16065 // CHECK15:       omp.inner.for.body:
16066 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16067 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16068 // CHECK15-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
16069 // CHECK15-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
16070 // CHECK15-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
16071 // CHECK15-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
16072 // CHECK15-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
16073 // CHECK15-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
16074 // CHECK15-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
16075 // CHECK15-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
16076 // CHECK15-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
16077 // CHECK15-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
16078 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
16079 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16080 // CHECK15:       omp.inner.for.inc:
16081 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16082 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16083 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
16084 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
16085 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16086 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16087 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
16088 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
16089 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16090 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16091 // CHECK15-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
16092 // CHECK15-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
16093 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16094 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
16095 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
16096 // CHECK15:       cond.true5:
16097 // CHECK15-NEXT:    br label [[COND_END7:%.*]]
16098 // CHECK15:       cond.false6:
16099 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16100 // CHECK15-NEXT:    br label [[COND_END7]]
16101 // CHECK15:       cond.end7:
16102 // CHECK15-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
16103 // CHECK15-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
16104 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16105 // CHECK15-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
16106 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16107 // CHECK15:       omp.inner.for.end:
16108 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16109 // CHECK15:       omp.loop.exit:
16110 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16111 // CHECK15-NEXT:    ret void
16112 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__5
16113 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
16114 // CHECK15-NEXT:  entry:
16115 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16116 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16117 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16118 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16119 // CHECK15-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
16120 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16121 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16122 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16123 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16124 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16125 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16126 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16127 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16128 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16129 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16130 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16131 // CHECK15-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
16132 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
16133 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16134 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16135 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16136 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16137 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16138 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16139 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16140 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16141 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16142 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
16143 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16144 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16145 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16146 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16147 // CHECK15:       omp.inner.for.cond:
16148 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16149 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16150 // CHECK15-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
16151 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16152 // CHECK15:       omp.inner.for.body:
16153 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16154 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
16155 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16156 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
16157 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
16158 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
16159 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
16160 // CHECK15-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
16161 // CHECK15-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
16162 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16163 // CHECK15:       omp.body.continue:
16164 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16165 // CHECK15:       omp.inner.for.inc:
16166 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16167 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16168 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
16169 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
16170 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16171 // CHECK15:       omp.inner.for.end:
16172 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16173 // CHECK15:       omp.loop.exit:
16174 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
16175 // CHECK15-NEXT:    ret void
16176 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
16177 // CHECK15-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
16178 // CHECK15-NEXT:  entry:
16179 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16180 // CHECK15-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
16181 // CHECK15-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
16182 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
16183 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
16184 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
16185 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16186 // CHECK15-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
16187 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16188 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16189 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
16190 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
16191 // CHECK15:       .execute:
16192 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
16193 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
16194 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
16195 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
16196 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
16197 // CHECK15-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
16198 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
16199 // CHECK15:       .omp.deinit:
16200 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
16201 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
16202 // CHECK15:       .exit:
16203 // CHECK15-NEXT:    ret void
16204 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__6
16205 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
16206 // CHECK15-NEXT:  entry:
16207 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16208 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16209 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16210 // CHECK15-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
16211 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16212 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16213 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
16214 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16215 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16216 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16217 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16218 // CHECK15-NEXT:    [[K:%.*]] = alloca i32, align 4
16219 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16220 // CHECK15-NEXT:    [[J:%.*]] = alloca i32, align 4
16221 // CHECK15-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
16222 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
16223 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16224 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16225 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16226 // CHECK15-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
16227 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16228 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16229 // CHECK15-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
16230 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16231 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16232 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16233 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16234 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
16235 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
16236 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16237 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
16238 // CHECK15-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16239 // CHECK15:       cond.true:
16240 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16241 // CHECK15:       cond.false:
16242 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16243 // CHECK15-NEXT:    br label [[COND_END]]
16244 // CHECK15:       cond.end:
16245 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16246 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16247 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16248 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16249 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16250 // CHECK15:       omp.inner.for.cond:
16251 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16252 // CHECK15-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
16253 // CHECK15-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16254 // CHECK15:       omp.inner.for.body:
16255 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16256 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16257 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
16258 // CHECK15-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
16259 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
16260 // CHECK15-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
16261 // CHECK15-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
16262 // CHECK15-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
16263 // CHECK15-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
16264 // CHECK15-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
16265 // CHECK15-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
16266 // CHECK15-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
16267 // CHECK15-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
16268 // CHECK15-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
16269 // CHECK15-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
16270 // CHECK15-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
16271 // CHECK15-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
16272 // CHECK15-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
16273 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
16274 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16275 // CHECK15:       omp.inner.for.inc:
16276 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16277 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16278 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
16279 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
16280 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16281 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16282 // CHECK15-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
16283 // CHECK15-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
16284 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16285 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16286 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
16287 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
16288 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16289 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
16290 // CHECK15-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
16291 // CHECK15:       cond.true6:
16292 // CHECK15-NEXT:    br label [[COND_END8:%.*]]
16293 // CHECK15:       cond.false7:
16294 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16295 // CHECK15-NEXT:    br label [[COND_END8]]
16296 // CHECK15:       cond.end8:
16297 // CHECK15-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
16298 // CHECK15-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
16299 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16300 // CHECK15-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
16301 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16302 // CHECK15:       omp.inner.for.end:
16303 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16304 // CHECK15:       omp.loop.exit:
16305 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
16306 // CHECK15-NEXT:    ret void
16307 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__7
16308 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
16309 // CHECK15-NEXT:  entry:
16310 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16311 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16312 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16313 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16314 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16315 // CHECK15-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
16316 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16317 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16318 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
16319 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16320 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16321 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16322 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16323 // CHECK15-NEXT:    [[K:%.*]] = alloca i32, align 4
16324 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16325 // CHECK15-NEXT:    [[J:%.*]] = alloca i32, align 4
16326 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16327 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16328 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16329 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16330 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16331 // CHECK15-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
16332 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16333 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16334 // CHECK15-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
16335 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16336 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16337 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
16338 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16339 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16340 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16341 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16342 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
16343 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16344 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16345 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
16346 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16347 // CHECK15:       omp.inner.for.cond:
16348 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16349 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16350 // CHECK15-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
16351 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16352 // CHECK15:       omp.inner.for.body:
16353 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16354 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
16355 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
16356 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16357 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
16358 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16359 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16360 // CHECK15-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
16361 // CHECK15-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
16362 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
16363 // CHECK15-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
16364 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
16365 // CHECK15-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
16366 // CHECK15-NEXT:    store i32 10, i32* [[K]], align 4
16367 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
16368 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
16369 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
16370 // CHECK15-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
16371 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
16372 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
16373 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
16374 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
16375 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
16376 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
16377 // CHECK15-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
16378 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
16379 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16380 // CHECK15:       omp.body.continue:
16381 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16382 // CHECK15:       omp.inner.for.inc:
16383 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16384 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16385 // CHECK15-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
16386 // CHECK15-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
16387 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16388 // CHECK15:       omp.inner.for.end:
16389 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16390 // CHECK15:       omp.loop.exit:
16391 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
16392 // CHECK15-NEXT:    ret void
16393 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
16394 // CHECK15-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
16395 // CHECK15-NEXT:  entry:
16396 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16397 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16398 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16399 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
16400 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
16401 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
16402 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16403 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16404 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16405 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16406 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
16407 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
16408 // CHECK15:       .execute:
16409 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
16410 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16411 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
16412 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
16413 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
16414 // CHECK15-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
16415 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
16416 // CHECK15:       .omp.deinit:
16417 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
16418 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
16419 // CHECK15:       .exit:
16420 // CHECK15-NEXT:    ret void
16421 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__8
16422 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
16423 // CHECK15-NEXT:  entry:
16424 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16425 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16426 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16427 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16428 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
16429 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16430 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
16431 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16432 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16433 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
16434 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16435 // CHECK15-NEXT:    [[J:%.*]] = alloca i32, align 4
16436 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
16437 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
16438 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
16439 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16440 // CHECK15-NEXT:    [[I9:%.*]] = alloca i32, align 4
16441 // CHECK15-NEXT:    [[J10:%.*]] = alloca i32, align 4
16442 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16443 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
16444 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16445 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16446 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16447 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16448 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16449 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16450 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
16451 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16452 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
16453 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16454 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
16455 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16456 // CHECK15-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
16457 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16458 // CHECK15-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
16459 // CHECK15-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
16460 // CHECK15-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
16461 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
16462 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
16463 // CHECK15-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
16464 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
16465 // CHECK15-NEXT:    store i32 0, i32* [[J]], align 4
16466 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16467 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
16468 // CHECK15-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
16469 // CHECK15:       land.lhs.true:
16470 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16471 // CHECK15-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
16472 // CHECK15-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
16473 // CHECK15:       omp.precond.then:
16474 // CHECK15-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
16475 // CHECK15-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16476 // CHECK15-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
16477 // CHECK15-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
16478 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16479 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16480 // CHECK15-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
16481 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16482 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
16483 // CHECK15-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
16484 // CHECK15-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16485 // CHECK15-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16486 // CHECK15-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
16487 // CHECK15-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16488 // CHECK15:       cond.true:
16489 // CHECK15-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16490 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16491 // CHECK15:       cond.false:
16492 // CHECK15-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16493 // CHECK15-NEXT:    br label [[COND_END]]
16494 // CHECK15:       cond.end:
16495 // CHECK15-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
16496 // CHECK15-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
16497 // CHECK15-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
16498 // CHECK15-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
16499 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16500 // CHECK15:       omp.inner.for.cond:
16501 // CHECK15-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16502 // CHECK15-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16503 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
16504 // CHECK15-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
16505 // CHECK15-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16506 // CHECK15:       omp.inner.for.body:
16507 // CHECK15-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
16508 // CHECK15-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
16509 // CHECK15-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16510 // CHECK15-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
16511 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
16512 // CHECK15-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
16513 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
16514 // CHECK15-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
16515 // CHECK15-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
16516 // CHECK15-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
16517 // CHECK15-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
16518 // CHECK15-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
16519 // CHECK15-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
16520 // CHECK15-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
16521 // CHECK15-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
16522 // CHECK15-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
16523 // CHECK15-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
16524 // CHECK15-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
16525 // CHECK15-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
16526 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16527 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
16528 // CHECK15-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
16529 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
16530 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16531 // CHECK15:       omp.inner.for.inc:
16532 // CHECK15-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16533 // CHECK15-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
16534 // CHECK15-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
16535 // CHECK15-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
16536 // CHECK15-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
16537 // CHECK15-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
16538 // CHECK15-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
16539 // CHECK15-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
16540 // CHECK15-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16541 // CHECK15-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
16542 // CHECK15-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
16543 // CHECK15-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
16544 // CHECK15-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16545 // CHECK15-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16546 // CHECK15-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
16547 // CHECK15-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
16548 // CHECK15:       cond.true18:
16549 // CHECK15-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16550 // CHECK15-NEXT:    br label [[COND_END20:%.*]]
16551 // CHECK15:       cond.false19:
16552 // CHECK15-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
16553 // CHECK15-NEXT:    br label [[COND_END20]]
16554 // CHECK15:       cond.end20:
16555 // CHECK15-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
16556 // CHECK15-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
16557 // CHECK15-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
16558 // CHECK15-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
16559 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16560 // CHECK15:       omp.inner.for.end:
16561 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16562 // CHECK15:       omp.loop.exit:
16563 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16564 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
16565 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
16566 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
16567 // CHECK15:       omp.precond.end:
16568 // CHECK15-NEXT:    ret void
16569 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__9
16570 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
16571 // CHECK15-NEXT:  entry:
16572 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16573 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16574 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16575 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16576 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16577 // CHECK15-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
16578 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
16579 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16580 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
16581 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16582 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16583 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
16584 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16585 // CHECK15-NEXT:    [[J:%.*]] = alloca i32, align 4
16586 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
16587 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
16588 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
16589 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16590 // CHECK15-NEXT:    [[I11:%.*]] = alloca i32, align 4
16591 // CHECK15-NEXT:    [[J12:%.*]] = alloca i32, align 4
16592 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16593 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16594 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16595 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16596 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16597 // CHECK15-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
16598 // CHECK15-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
16599 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16600 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
16601 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16602 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
16603 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16604 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
16605 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16606 // CHECK15-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
16607 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16608 // CHECK15-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
16609 // CHECK15-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
16610 // CHECK15-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
16611 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
16612 // CHECK15-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
16613 // CHECK15-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
16614 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
16615 // CHECK15-NEXT:    store i32 0, i32* [[J]], align 4
16616 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16617 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
16618 // CHECK15-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
16619 // CHECK15:       land.lhs.true:
16620 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16621 // CHECK15-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
16622 // CHECK15-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
16623 // CHECK15:       omp.precond.then:
16624 // CHECK15-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
16625 // CHECK15-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
16626 // CHECK15-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
16627 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16628 // CHECK15-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
16629 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16630 // CHECK15-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
16631 // CHECK15-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
16632 // CHECK15-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
16633 // CHECK15-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
16634 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16635 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16636 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16637 // CHECK15-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
16638 // CHECK15-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
16639 // CHECK15-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
16640 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16641 // CHECK15:       omp.inner.for.cond:
16642 // CHECK15-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16643 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16644 // CHECK15-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
16645 // CHECK15-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
16646 // CHECK15-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16647 // CHECK15:       omp.inner.for.body:
16648 // CHECK15-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16649 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16650 // CHECK15-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
16651 // CHECK15-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
16652 // CHECK15-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
16653 // CHECK15-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
16654 // CHECK15-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
16655 // CHECK15-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
16656 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
16657 // CHECK15-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
16658 // CHECK15-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
16659 // CHECK15-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16660 // CHECK15-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16661 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16662 // CHECK15-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
16663 // CHECK15-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
16664 // CHECK15-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
16665 // CHECK15-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
16666 // CHECK15-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
16667 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16668 // CHECK15-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
16669 // CHECK15-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
16670 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
16671 // CHECK15-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
16672 // CHECK15-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
16673 // CHECK15-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
16674 // CHECK15-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
16675 // CHECK15-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
16676 // CHECK15-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
16677 // CHECK15-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
16678 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
16679 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
16680 // CHECK15-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
16681 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
16682 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
16683 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
16684 // CHECK15-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
16685 // CHECK15-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
16686 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16687 // CHECK15:       omp.body.continue:
16688 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16689 // CHECK15:       omp.inner.for.inc:
16690 // CHECK15-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16691 // CHECK15-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
16692 // CHECK15-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
16693 // CHECK15-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
16694 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16695 // CHECK15:       omp.inner.for.end:
16696 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16697 // CHECK15:       omp.loop.exit:
16698 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16699 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
16700 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
16701 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
16702 // CHECK15:       omp.precond.end:
16703 // CHECK15-NEXT:    ret void
16704 // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
16705 // CHECK15-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
16706 // CHECK15-NEXT:  entry:
16707 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16708 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
16709 // CHECK15-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
16710 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16711 // CHECK15-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
16712 // CHECK15-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
16713 // CHECK15-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
16714 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16715 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
16716 // CHECK15-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
16717 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
16718 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16719 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
16720 // CHECK15-NEXT:    br label [[DOTEXECUTE:%.*]]
16721 // CHECK15:       .execute:
16722 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
16723 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16724 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
16725 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
16726 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
16727 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
16728 // CHECK15-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
16729 // CHECK15-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
16730 // CHECK15:       .omp.deinit:
16731 // CHECK15-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
16732 // CHECK15-NEXT:    br label [[DOTEXIT:%.*]]
16733 // CHECK15:       .exit:
16734 // CHECK15-NEXT:    ret void
16735 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__10
16736 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
16737 // CHECK15-NEXT:  entry:
16738 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16739 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16740 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16741 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
16742 // CHECK15-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
16743 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16744 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16745 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16746 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16747 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16748 // CHECK15-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16749 // CHECK15-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16750 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16751 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16752 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
16753 // CHECK15-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16754 // CHECK15-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
16755 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16756 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16757 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16758 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
16759 // CHECK15-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
16760 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
16761 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16762 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
16763 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16764 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
16765 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16766 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16767 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16768 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
16769 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16770 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
16771 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16772 // CHECK15:       omp.precond.then:
16773 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16774 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16775 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
16776 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16777 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16778 // CHECK15-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16779 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16780 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
16781 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
16782 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16783 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16784 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
16785 // CHECK15-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16786 // CHECK15:       cond.true:
16787 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16788 // CHECK15-NEXT:    br label [[COND_END:%.*]]
16789 // CHECK15:       cond.false:
16790 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16791 // CHECK15-NEXT:    br label [[COND_END]]
16792 // CHECK15:       cond.end:
16793 // CHECK15-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
16794 // CHECK15-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16795 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16796 // CHECK15-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
16797 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16798 // CHECK15:       omp.inner.for.cond:
16799 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16800 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16801 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
16802 // CHECK15-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
16803 // CHECK15-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16804 // CHECK15:       omp.inner.for.body:
16805 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16806 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16807 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
16808 // CHECK15-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
16809 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
16810 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
16811 // CHECK15-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
16812 // CHECK15-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
16813 // CHECK15-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
16814 // CHECK15-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
16815 // CHECK15-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
16816 // CHECK15-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
16817 // CHECK15-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
16818 // CHECK15-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
16819 // CHECK15-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
16820 // CHECK15-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
16821 // CHECK15-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
16822 // CHECK15-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
16823 // CHECK15-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
16824 // CHECK15-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
16825 // CHECK15-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
16826 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16827 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
16828 // CHECK15-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
16829 // CHECK15-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
16830 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16831 // CHECK15:       omp.inner.for.inc:
16832 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16833 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16834 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
16835 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
16836 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16837 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16838 // CHECK15-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
16839 // CHECK15-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
16840 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16841 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16842 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
16843 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
16844 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16845 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16846 // CHECK15-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
16847 // CHECK15-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
16848 // CHECK15:       cond.true10:
16849 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16850 // CHECK15-NEXT:    br label [[COND_END12:%.*]]
16851 // CHECK15:       cond.false11:
16852 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16853 // CHECK15-NEXT:    br label [[COND_END12]]
16854 // CHECK15:       cond.end12:
16855 // CHECK15-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
16856 // CHECK15-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
16857 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16858 // CHECK15-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
16859 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16860 // CHECK15:       omp.inner.for.end:
16861 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16862 // CHECK15:       omp.loop.exit:
16863 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16864 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
16865 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
16866 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
16867 // CHECK15:       omp.precond.end:
16868 // CHECK15-NEXT:    ret void
16869 // CHECK15-LABEL: define {{[^@]+}}@__omp_outlined__11
16870 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
16871 // CHECK15-NEXT:  entry:
16872 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16873 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16874 // CHECK15-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16875 // CHECK15-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16876 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16877 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
16878 // CHECK15-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
16879 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16880 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16881 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16882 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16883 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16884 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16885 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16886 // CHECK15-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16887 // CHECK15-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16888 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
16889 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
16890 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
16891 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16892 // CHECK15-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16893 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16894 // CHECK15-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
16895 // CHECK15-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
16896 // CHECK15-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
16897 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16898 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
16899 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16900 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
16901 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16902 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16903 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16904 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
16905 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16906 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
16907 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16908 // CHECK15:       omp.precond.then:
16909 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16910 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16911 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
16912 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
16913 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16914 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
16915 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
16916 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16917 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16918 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16919 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
16920 // CHECK15-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16921 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16922 // CHECK15-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
16923 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16924 // CHECK15:       omp.inner.for.cond:
16925 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16926 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
16927 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
16928 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16929 // CHECK15:       omp.inner.for.body:
16930 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16931 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
16932 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16933 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
16934 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
16935 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
16936 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
16937 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
16938 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
16939 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
16940 // CHECK15-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
16941 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16942 // CHECK15:       omp.body.continue:
16943 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16944 // CHECK15:       omp.inner.for.inc:
16945 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16946 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
16947 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
16948 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
16949 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]]
16950 // CHECK15:       omp.inner.for.end:
16951 // CHECK15-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16952 // CHECK15:       omp.loop.exit:
16953 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
16954 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
16955 // CHECK15-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
16956 // CHECK15-NEXT:    br label [[OMP_PRECOND_END]]
16957 // CHECK15:       omp.precond.end:
16958 // CHECK15-NEXT:    ret void
16959 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l43
16960 // CHECK16-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
16961 // CHECK16-NEXT:  entry:
16962 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16963 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
16964 // CHECK16-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
16965 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
16966 // CHECK16-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
16967 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
16968 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
16969 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
16970 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16971 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
16972 // CHECK16-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
16973 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
16974 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
16975 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
16976 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
16977 // CHECK16:       .execute:
16978 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
16979 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16980 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
16981 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
16982 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[L_ADDR]], align 4
16983 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[L_CASTED]], align 4
16984 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_CASTED]], align 4
16985 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
16986 // CHECK16-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
16987 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
16988 // CHECK16:       .omp.deinit:
16989 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
16990 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
16991 // CHECK16:       .exit:
16992 // CHECK16-NEXT:    ret void
16993 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__
16994 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
16995 // CHECK16-NEXT:  entry:
16996 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
16997 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
16998 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16999 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
17000 // CHECK16-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
17001 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17002 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17003 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17004 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17005 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17006 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17007 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17008 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17009 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17010 // CHECK16-NEXT:    [[I4:%.*]] = alloca i32, align 4
17011 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17012 // CHECK16-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
17013 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
17014 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17015 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17016 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17017 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
17018 // CHECK16-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
17019 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
17020 // CHECK16-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1)
17021 // CHECK16-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty*
17022 // CHECK16-NEXT:    [[L1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0
17023 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
17024 // CHECK16-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
17025 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17026 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
17027 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17028 // CHECK16-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17029 // CHECK16-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17030 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17031 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17032 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
17033 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17034 // CHECK16:       omp.precond.then:
17035 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17036 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17037 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_COMB_UB]], align 4
17038 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17039 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17040 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17041 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
17042 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
17043 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17044 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17045 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
17046 // CHECK16-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17047 // CHECK16:       cond.true:
17048 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17049 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17050 // CHECK16:       cond.false:
17051 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17052 // CHECK16-NEXT:    br label [[COND_END]]
17053 // CHECK16:       cond.end:
17054 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
17055 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17056 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17057 // CHECK16-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
17058 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17059 // CHECK16:       omp.inner.for.cond:
17060 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17061 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17062 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
17063 // CHECK16-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
17064 // CHECK16-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17065 // CHECK16:       omp.inner.for.body:
17066 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17067 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17068 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
17069 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[N_CASTED]], align 4
17070 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
17071 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[L_ADDR]], align 4
17072 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[L_CASTED]], align 4
17073 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[L_CASTED]], align 4
17074 // CHECK16-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
17075 // CHECK16-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP16]] to i8*
17076 // CHECK16-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
17077 // CHECK16-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
17078 // CHECK16-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
17079 // CHECK16-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
17080 // CHECK16-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
17081 // CHECK16-NEXT:    [[TMP27:%.*]] = inttoptr i32 [[TMP19]] to i8*
17082 // CHECK16-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
17083 // CHECK16-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
17084 // CHECK16-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
17085 // CHECK16-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
17086 // CHECK16-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
17087 // CHECK16-NEXT:    [[TMP31:%.*]] = inttoptr i32 [[TMP21]] to i8*
17088 // CHECK16-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 4
17089 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17090 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
17091 // CHECK16-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
17092 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i32 5)
17093 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17094 // CHECK16:       omp.inner.for.inc:
17095 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17096 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17097 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
17098 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
17099 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17100 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17101 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
17102 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
17103 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17104 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17105 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
17106 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
17107 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17108 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17109 // CHECK16-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
17110 // CHECK16-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
17111 // CHECK16:       cond.true11:
17112 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17113 // CHECK16-NEXT:    br label [[COND_END13:%.*]]
17114 // CHECK16:       cond.false12:
17115 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17116 // CHECK16-NEXT:    br label [[COND_END13]]
17117 // CHECK16:       cond.end13:
17118 // CHECK16-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE11]] ], [ [[TMP44]], [[COND_FALSE12]] ]
17119 // CHECK16-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
17120 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17121 // CHECK16-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
17122 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17123 // CHECK16:       omp.inner.for.end:
17124 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17125 // CHECK16:       omp.loop.exit:
17126 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17127 // CHECK16-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
17128 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP47]])
17129 // CHECK16-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17130 // CHECK16-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
17131 // CHECK16-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
17132 // CHECK16:       .omp.lastprivate.then:
17133 // CHECK16-NEXT:    [[TMP50:%.*]] = load i32, i32* [[L_ADDR]], align 4
17134 // CHECK16-NEXT:    store i32 [[TMP50]], i32* [[L_ADDR]], align 4
17135 // CHECK16-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
17136 // CHECK16:       .omp.lastprivate.done:
17137 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17138 // CHECK16:       omp.precond.end:
17139 // CHECK16-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]])
17140 // CHECK16-NEXT:    ret void
17141 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__1
17142 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
17143 // CHECK16-NEXT:  entry:
17144 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17145 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17146 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17147 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17148 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17149 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
17150 // CHECK16-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
17151 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17152 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17153 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17154 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17155 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17156 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17157 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17158 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17159 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17160 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17161 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17162 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17163 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17164 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17165 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17166 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
17167 // CHECK16-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
17168 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
17169 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17170 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
17171 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17172 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
17173 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17174 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17175 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17176 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17177 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17178 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
17179 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17180 // CHECK16:       omp.precond.then:
17181 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17182 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17183 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
17184 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17185 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17186 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
17187 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
17188 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17189 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17190 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17191 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
17192 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
17193 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17194 // CHECK16:       omp.dispatch.cond:
17195 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17196 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17197 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
17198 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17199 // CHECK16:       cond.true:
17200 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17201 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17202 // CHECK16:       cond.false:
17203 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17204 // CHECK16-NEXT:    br label [[COND_END]]
17205 // CHECK16:       cond.end:
17206 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
17207 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17208 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17209 // CHECK16-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
17210 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17211 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17212 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
17213 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17214 // CHECK16:       omp.dispatch.body:
17215 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17216 // CHECK16:       omp.inner.for.cond:
17217 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17218 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17219 // CHECK16-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
17220 // CHECK16-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17221 // CHECK16:       omp.inner.for.body:
17222 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17223 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
17224 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17225 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
17226 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
17227 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
17228 // CHECK16-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
17229 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
17230 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
17231 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17232 // CHECK16:       omp.body.continue:
17233 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17234 // CHECK16:       omp.inner.for.inc:
17235 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17236 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
17237 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
17238 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17239 // CHECK16:       omp.inner.for.end:
17240 // CHECK16-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17241 // CHECK16:       omp.dispatch.inc:
17242 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17243 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17244 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
17245 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
17246 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17247 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17248 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
17249 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
17250 // CHECK16-NEXT:    br label [[OMP_DISPATCH_COND]]
17251 // CHECK16:       omp.dispatch.end:
17252 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17253 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
17254 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
17255 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17256 // CHECK16-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
17257 // CHECK16-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
17258 // CHECK16:       .omp.lastprivate.then:
17259 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
17260 // CHECK16-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
17261 // CHECK16-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
17262 // CHECK16:       .omp.lastprivate.done:
17263 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17264 // CHECK16:       omp.precond.end:
17265 // CHECK16-NEXT:    ret void
17266 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l49
17267 // CHECK16-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
17268 // CHECK16-NEXT:  entry:
17269 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17270 // CHECK16-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
17271 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17272 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
17273 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
17274 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
17275 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17276 // CHECK16-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
17277 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
17278 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17279 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
17280 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
17281 // CHECK16:       .execute:
17282 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
17283 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17284 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17285 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17286 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
17287 // CHECK16-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i16]* [[TMP0]]) #[[ATTR3]]
17288 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
17289 // CHECK16:       .omp.deinit:
17290 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
17291 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
17292 // CHECK16:       .exit:
17293 // CHECK16-NEXT:    ret void
17294 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__2
17295 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
17296 // CHECK16-NEXT:  entry:
17297 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17298 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17299 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17300 // CHECK16-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
17301 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17302 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17303 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17304 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17305 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17306 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17307 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17308 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17309 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17310 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17311 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17312 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
17313 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17314 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17315 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17316 // CHECK16-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
17317 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
17318 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17319 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
17320 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17321 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
17322 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17323 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17324 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17325 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17326 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17327 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
17328 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17329 // CHECK16:       omp.precond.then:
17330 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17331 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17332 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
17333 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17334 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17335 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17336 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17337 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
17338 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
17339 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17340 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17341 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
17342 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17343 // CHECK16:       cond.true:
17344 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17345 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17346 // CHECK16:       cond.false:
17347 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17348 // CHECK16-NEXT:    br label [[COND_END]]
17349 // CHECK16:       cond.end:
17350 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
17351 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17352 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17353 // CHECK16-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
17354 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17355 // CHECK16:       omp.inner.for.cond:
17356 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17357 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17358 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
17359 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
17360 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17361 // CHECK16:       omp.inner.for.body:
17362 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17363 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17364 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
17365 // CHECK16-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
17366 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
17367 // CHECK16-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
17368 // CHECK16-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
17369 // CHECK16-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
17370 // CHECK16-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
17371 // CHECK16-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
17372 // CHECK16-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
17373 // CHECK16-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
17374 // CHECK16-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
17375 // CHECK16-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
17376 // CHECK16-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
17377 // CHECK16-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
17378 // CHECK16-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
17379 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17380 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
17381 // CHECK16-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
17382 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
17383 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17384 // CHECK16:       omp.inner.for.inc:
17385 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17386 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17387 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
17388 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
17389 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17390 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17391 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
17392 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
17393 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17394 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17395 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
17396 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
17397 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17398 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17399 // CHECK16-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
17400 // CHECK16-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
17401 // CHECK16:       cond.true10:
17402 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17403 // CHECK16-NEXT:    br label [[COND_END12:%.*]]
17404 // CHECK16:       cond.false11:
17405 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17406 // CHECK16-NEXT:    br label [[COND_END12]]
17407 // CHECK16:       cond.end12:
17408 // CHECK16-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
17409 // CHECK16-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
17410 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17411 // CHECK16-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
17412 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17413 // CHECK16:       omp.inner.for.end:
17414 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17415 // CHECK16:       omp.loop.exit:
17416 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17417 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
17418 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
17419 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17420 // CHECK16:       omp.precond.end:
17421 // CHECK16-NEXT:    ret void
17422 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__3
17423 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
17424 // CHECK16-NEXT:  entry:
17425 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17426 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17427 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17428 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17429 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17430 // CHECK16-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
17431 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17432 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17433 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17434 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17435 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17436 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17437 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17438 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17439 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17440 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
17441 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17442 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17443 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17444 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17445 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17446 // CHECK16-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
17447 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
17448 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17449 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
17450 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17451 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
17452 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17453 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17454 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17455 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17456 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17457 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
17458 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17459 // CHECK16:       omp.precond.then:
17460 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17461 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17462 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
17463 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17464 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17465 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
17466 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
17467 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17468 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17469 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17470 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
17471 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17472 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17473 // CHECK16-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
17474 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17475 // CHECK16:       omp.inner.for.cond:
17476 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17477 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17478 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
17479 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17480 // CHECK16:       omp.inner.for.body:
17481 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17482 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
17483 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17484 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
17485 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
17486 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
17487 // CHECK16-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
17488 // CHECK16-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
17489 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
17490 // CHECK16-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
17491 // CHECK16-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
17492 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17493 // CHECK16:       omp.body.continue:
17494 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17495 // CHECK16:       omp.inner.for.inc:
17496 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17497 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17498 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
17499 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
17500 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17501 // CHECK16:       omp.inner.for.end:
17502 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17503 // CHECK16:       omp.loop.exit:
17504 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17505 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
17506 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]])
17507 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
17508 // CHECK16:       omp.precond.end:
17509 // CHECK16-NEXT:    ret void
17510 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l54
17511 // CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
17512 // CHECK16-NEXT:  entry:
17513 // CHECK16-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
17514 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
17515 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
17516 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
17517 // CHECK16-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
17518 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
17519 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17520 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
17521 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
17522 // CHECK16:       .execute:
17523 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
17524 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
17525 // CHECK16-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR3]]
17526 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
17527 // CHECK16:       .omp.deinit:
17528 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
17529 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
17530 // CHECK16:       .exit:
17531 // CHECK16-NEXT:    ret void
17532 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__4
17533 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
17534 // CHECK16-NEXT:  entry:
17535 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17536 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17537 // CHECK16-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
17538 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17539 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17540 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17541 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17542 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17543 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17544 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17545 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
17546 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17547 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17548 // CHECK16-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
17549 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
17550 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17551 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
17552 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17553 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17554 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17555 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17556 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
17557 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
17558 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17559 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
17560 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17561 // CHECK16:       cond.true:
17562 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17563 // CHECK16:       cond.false:
17564 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17565 // CHECK16-NEXT:    br label [[COND_END]]
17566 // CHECK16:       cond.end:
17567 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
17568 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17569 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17570 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
17571 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17572 // CHECK16:       omp.inner.for.cond:
17573 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17574 // CHECK16-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
17575 // CHECK16-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17576 // CHECK16:       omp.inner.for.body:
17577 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17578 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17579 // CHECK16-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
17580 // CHECK16-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
17581 // CHECK16-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
17582 // CHECK16-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
17583 // CHECK16-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
17584 // CHECK16-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
17585 // CHECK16-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
17586 // CHECK16-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
17587 // CHECK16-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
17588 // CHECK16-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
17589 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
17590 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17591 // CHECK16:       omp.inner.for.inc:
17592 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17593 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17594 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
17595 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
17596 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17597 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17598 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
17599 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
17600 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17601 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17602 // CHECK16-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
17603 // CHECK16-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
17604 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17605 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
17606 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
17607 // CHECK16:       cond.true5:
17608 // CHECK16-NEXT:    br label [[COND_END7:%.*]]
17609 // CHECK16:       cond.false6:
17610 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17611 // CHECK16-NEXT:    br label [[COND_END7]]
17612 // CHECK16:       cond.end7:
17613 // CHECK16-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
17614 // CHECK16-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
17615 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17616 // CHECK16-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
17617 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17618 // CHECK16:       omp.inner.for.end:
17619 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17620 // CHECK16:       omp.loop.exit:
17621 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
17622 // CHECK16-NEXT:    ret void
17623 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__5
17624 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
17625 // CHECK16-NEXT:  entry:
17626 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17627 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17628 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17629 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17630 // CHECK16-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
17631 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17632 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17633 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17634 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17635 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17636 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17637 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17638 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17639 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17640 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17641 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17642 // CHECK16-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
17643 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
17644 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17645 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17646 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17647 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17648 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
17649 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
17650 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17651 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17652 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17653 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
17654 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17655 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17656 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
17657 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17658 // CHECK16:       omp.inner.for.cond:
17659 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17660 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17661 // CHECK16-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
17662 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17663 // CHECK16:       omp.inner.for.body:
17664 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17665 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
17666 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17667 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
17668 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
17669 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
17670 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
17671 // CHECK16-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
17672 // CHECK16-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
17673 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17674 // CHECK16:       omp.body.continue:
17675 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17676 // CHECK16:       omp.inner.for.inc:
17677 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17678 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17679 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
17680 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
17681 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17682 // CHECK16:       omp.inner.for.end:
17683 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17684 // CHECK16:       omp.loop.exit:
17685 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
17686 // CHECK16-NEXT:    ret void
17687 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
17688 // CHECK16-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
17689 // CHECK16-NEXT:  entry:
17690 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
17691 // CHECK16-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
17692 // CHECK16-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
17693 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
17694 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
17695 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
17696 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
17697 // CHECK16-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
17698 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
17699 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17700 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
17701 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
17702 // CHECK16:       .execute:
17703 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
17704 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[F_ADDR]], align 4
17705 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[F_CASTED]], align 4
17706 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_CASTED]], align 4
17707 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
17708 // CHECK16-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP3]]) #[[ATTR3]]
17709 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
17710 // CHECK16:       .omp.deinit:
17711 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
17712 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
17713 // CHECK16:       .exit:
17714 // CHECK16-NEXT:    ret void
17715 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__6
17716 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
17717 // CHECK16-NEXT:  entry:
17718 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17719 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17720 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
17721 // CHECK16-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
17722 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17723 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17724 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
17725 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17726 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17727 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17728 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17729 // CHECK16-NEXT:    [[K:%.*]] = alloca i32, align 4
17730 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17731 // CHECK16-NEXT:    [[J:%.*]] = alloca i32, align 4
17732 // CHECK16-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
17733 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
17734 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17735 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17736 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
17737 // CHECK16-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
17738 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
17739 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17740 // CHECK16-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
17741 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17742 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17743 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17744 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17745 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
17746 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
17747 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17748 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
17749 // CHECK16-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17750 // CHECK16:       cond.true:
17751 // CHECK16-NEXT:    br label [[COND_END:%.*]]
17752 // CHECK16:       cond.false:
17753 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17754 // CHECK16-NEXT:    br label [[COND_END]]
17755 // CHECK16:       cond.end:
17756 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
17757 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17758 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17759 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
17760 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17761 // CHECK16:       omp.inner.for.cond:
17762 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17763 // CHECK16-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
17764 // CHECK16-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17765 // CHECK16:       omp.inner.for.body:
17766 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17767 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17768 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
17769 // CHECK16-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
17770 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
17771 // CHECK16-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
17772 // CHECK16-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
17773 // CHECK16-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
17774 // CHECK16-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
17775 // CHECK16-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
17776 // CHECK16-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
17777 // CHECK16-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
17778 // CHECK16-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
17779 // CHECK16-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
17780 // CHECK16-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
17781 // CHECK16-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
17782 // CHECK16-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
17783 // CHECK16-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
17784 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
17785 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17786 // CHECK16:       omp.inner.for.inc:
17787 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17788 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17789 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
17790 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
17791 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17792 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17793 // CHECK16-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
17794 // CHECK16-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
17795 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17796 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17797 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
17798 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
17799 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17800 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
17801 // CHECK16-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
17802 // CHECK16:       cond.true6:
17803 // CHECK16-NEXT:    br label [[COND_END8:%.*]]
17804 // CHECK16:       cond.false7:
17805 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17806 // CHECK16-NEXT:    br label [[COND_END8]]
17807 // CHECK16:       cond.end8:
17808 // CHECK16-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
17809 // CHECK16-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
17810 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17811 // CHECK16-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
17812 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17813 // CHECK16:       omp.inner.for.end:
17814 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17815 // CHECK16:       omp.loop.exit:
17816 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
17817 // CHECK16-NEXT:    ret void
17818 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__7
17819 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
17820 // CHECK16-NEXT:  entry:
17821 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17822 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17823 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
17824 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
17825 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
17826 // CHECK16-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
17827 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17828 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17829 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
17830 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17831 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17832 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17833 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17834 // CHECK16-NEXT:    [[K:%.*]] = alloca i32, align 4
17835 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17836 // CHECK16-NEXT:    [[J:%.*]] = alloca i32, align 4
17837 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17838 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17839 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17840 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17841 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
17842 // CHECK16-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
17843 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
17844 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17845 // CHECK16-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
17846 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
17847 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17848 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
17849 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
17850 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17851 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17852 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17853 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
17854 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17855 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17856 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
17857 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17858 // CHECK16:       omp.inner.for.cond:
17859 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17860 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
17861 // CHECK16-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
17862 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17863 // CHECK16:       omp.inner.for.body:
17864 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17865 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
17866 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
17867 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17868 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
17869 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17870 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17871 // CHECK16-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
17872 // CHECK16-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
17873 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
17874 // CHECK16-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
17875 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
17876 // CHECK16-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
17877 // CHECK16-NEXT:    store i32 10, i32* [[K]], align 4
17878 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
17879 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
17880 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
17881 // CHECK16-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
17882 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
17883 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
17884 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
17885 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
17886 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
17887 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
17888 // CHECK16-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
17889 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
17890 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17891 // CHECK16:       omp.body.continue:
17892 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17893 // CHECK16:       omp.inner.for.inc:
17894 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17895 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17896 // CHECK16-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
17897 // CHECK16-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
17898 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
17899 // CHECK16:       omp.inner.for.end:
17900 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17901 // CHECK16:       omp.loop.exit:
17902 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
17903 // CHECK16-NEXT:    ret void
17904 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l67
17905 // CHECK16-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
17906 // CHECK16-NEXT:  entry:
17907 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17908 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
17909 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17910 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
17911 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
17912 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
17913 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17914 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
17915 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
17916 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17917 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
17918 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
17919 // CHECK16:       .execute:
17920 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
17921 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17922 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
17923 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
17924 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
17925 // CHECK16-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR3]]
17926 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
17927 // CHECK16:       .omp.deinit:
17928 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
17929 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
17930 // CHECK16:       .exit:
17931 // CHECK16-NEXT:    ret void
17932 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__8
17933 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
17934 // CHECK16-NEXT:  entry:
17935 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
17936 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
17937 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17938 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
17939 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17940 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17941 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
17942 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17943 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17944 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
17945 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
17946 // CHECK16-NEXT:    [[J:%.*]] = alloca i32, align 4
17947 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
17948 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
17949 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
17950 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17951 // CHECK16-NEXT:    [[I9:%.*]] = alloca i32, align 4
17952 // CHECK16-NEXT:    [[J10:%.*]] = alloca i32, align 4
17953 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
17954 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
17955 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
17956 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
17957 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17958 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
17959 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
17960 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17961 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
17962 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17963 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17964 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17965 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
17966 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17967 // CHECK16-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
17968 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17969 // CHECK16-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
17970 // CHECK16-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
17971 // CHECK16-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
17972 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
17973 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
17974 // CHECK16-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
17975 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
17976 // CHECK16-NEXT:    store i32 0, i32* [[J]], align 4
17977 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17978 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
17979 // CHECK16-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
17980 // CHECK16:       land.lhs.true:
17981 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17982 // CHECK16-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
17983 // CHECK16-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
17984 // CHECK16:       omp.precond.then:
17985 // CHECK16-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
17986 // CHECK16-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
17987 // CHECK16-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
17988 // CHECK16-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
17989 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17990 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
17991 // CHECK16-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
17992 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
17993 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17994 // CHECK16-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
17995 // CHECK16-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
17996 // CHECK16-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
17997 // CHECK16-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
17998 // CHECK16-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17999 // CHECK16:       cond.true:
18000 // CHECK16-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
18001 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18002 // CHECK16:       cond.false:
18003 // CHECK16-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
18004 // CHECK16-NEXT:    br label [[COND_END]]
18005 // CHECK16:       cond.end:
18006 // CHECK16-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
18007 // CHECK16-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
18008 // CHECK16-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
18009 // CHECK16-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
18010 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18011 // CHECK16:       omp.inner.for.cond:
18012 // CHECK16-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18013 // CHECK16-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
18014 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
18015 // CHECK16-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
18016 // CHECK16-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18017 // CHECK16:       omp.inner.for.body:
18018 // CHECK16-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
18019 // CHECK16-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
18020 // CHECK16-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
18021 // CHECK16-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
18022 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
18023 // CHECK16-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
18024 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
18025 // CHECK16-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
18026 // CHECK16-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
18027 // CHECK16-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
18028 // CHECK16-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
18029 // CHECK16-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
18030 // CHECK16-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
18031 // CHECK16-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
18032 // CHECK16-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
18033 // CHECK16-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
18034 // CHECK16-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
18035 // CHECK16-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
18036 // CHECK16-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
18037 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18038 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
18039 // CHECK16-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
18040 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
18041 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18042 // CHECK16:       omp.inner.for.inc:
18043 // CHECK16-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18044 // CHECK16-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
18045 // CHECK16-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
18046 // CHECK16-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
18047 // CHECK16-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
18048 // CHECK16-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
18049 // CHECK16-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
18050 // CHECK16-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
18051 // CHECK16-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
18052 // CHECK16-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
18053 // CHECK16-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
18054 // CHECK16-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
18055 // CHECK16-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
18056 // CHECK16-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
18057 // CHECK16-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
18058 // CHECK16-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
18059 // CHECK16:       cond.true18:
18060 // CHECK16-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
18061 // CHECK16-NEXT:    br label [[COND_END20:%.*]]
18062 // CHECK16:       cond.false19:
18063 // CHECK16-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
18064 // CHECK16-NEXT:    br label [[COND_END20]]
18065 // CHECK16:       cond.end20:
18066 // CHECK16-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
18067 // CHECK16-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
18068 // CHECK16-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
18069 // CHECK16-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
18070 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
18071 // CHECK16:       omp.inner.for.end:
18072 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18073 // CHECK16:       omp.loop.exit:
18074 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18075 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
18076 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP46]])
18077 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18078 // CHECK16:       omp.precond.end:
18079 // CHECK16-NEXT:    ret void
18080 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__9
18081 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
18082 // CHECK16-NEXT:  entry:
18083 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18084 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18085 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18086 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18087 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18088 // CHECK16-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
18089 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
18090 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18091 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
18092 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18093 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18094 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
18095 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18096 // CHECK16-NEXT:    [[J:%.*]] = alloca i32, align 4
18097 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
18098 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
18099 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
18100 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18101 // CHECK16-NEXT:    [[I11:%.*]] = alloca i32, align 4
18102 // CHECK16-NEXT:    [[J12:%.*]] = alloca i32, align 4
18103 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18104 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18105 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18106 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18107 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18108 // CHECK16-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
18109 // CHECK16-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
18110 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
18111 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18112 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18113 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18114 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18115 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
18116 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18117 // CHECK16-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
18118 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18119 // CHECK16-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
18120 // CHECK16-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
18121 // CHECK16-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
18122 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
18123 // CHECK16-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
18124 // CHECK16-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
18125 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18126 // CHECK16-NEXT:    store i32 0, i32* [[J]], align 4
18127 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18128 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
18129 // CHECK16-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
18130 // CHECK16:       land.lhs.true:
18131 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18132 // CHECK16-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
18133 // CHECK16-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
18134 // CHECK16:       omp.precond.then:
18135 // CHECK16-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
18136 // CHECK16-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
18137 // CHECK16-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
18138 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18139 // CHECK16-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
18140 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18141 // CHECK16-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
18142 // CHECK16-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
18143 // CHECK16-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
18144 // CHECK16-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
18145 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18146 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18147 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
18148 // CHECK16-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
18149 // CHECK16-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
18150 // CHECK16-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
18151 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18152 // CHECK16:       omp.inner.for.cond:
18153 // CHECK16-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18154 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18155 // CHECK16-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
18156 // CHECK16-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
18157 // CHECK16-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18158 // CHECK16:       omp.inner.for.body:
18159 // CHECK16-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18160 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18161 // CHECK16-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
18162 // CHECK16-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
18163 // CHECK16-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
18164 // CHECK16-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
18165 // CHECK16-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
18166 // CHECK16-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
18167 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
18168 // CHECK16-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
18169 // CHECK16-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
18170 // CHECK16-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18171 // CHECK16-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18172 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18173 // CHECK16-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
18174 // CHECK16-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
18175 // CHECK16-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
18176 // CHECK16-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
18177 // CHECK16-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
18178 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18179 // CHECK16-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
18180 // CHECK16-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
18181 // CHECK16-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
18182 // CHECK16-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
18183 // CHECK16-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
18184 // CHECK16-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
18185 // CHECK16-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
18186 // CHECK16-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
18187 // CHECK16-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
18188 // CHECK16-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
18189 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
18190 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
18191 // CHECK16-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
18192 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
18193 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
18194 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
18195 // CHECK16-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
18196 // CHECK16-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
18197 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18198 // CHECK16:       omp.body.continue:
18199 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18200 // CHECK16:       omp.inner.for.inc:
18201 // CHECK16-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
18202 // CHECK16-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
18203 // CHECK16-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
18204 // CHECK16-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
18205 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
18206 // CHECK16:       omp.inner.for.end:
18207 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18208 // CHECK16:       omp.loop.exit:
18209 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18210 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
18211 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
18212 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18213 // CHECK16:       omp.precond.end:
18214 // CHECK16-NEXT:    ret void
18215 // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
18216 // CHECK16-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
18217 // CHECK16-NEXT:  entry:
18218 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18219 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
18220 // CHECK16-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
18221 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18222 // CHECK16-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
18223 // CHECK16-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
18224 // CHECK16-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
18225 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18226 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
18227 // CHECK16-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
18228 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
18229 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
18230 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_init(i32 [[NVPTX_NUM_THREADS]], i16 0)
18231 // CHECK16-NEXT:    br label [[DOTEXECUTE:%.*]]
18232 // CHECK16:       .execute:
18233 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
18234 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18235 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
18236 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
18237 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[V_ADDR]], align 4
18238 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
18239 // CHECK16-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]], [1000 x i32]* [[TMP0]], i32* [[TMP4]]) #[[ATTR3]]
18240 // CHECK16-NEXT:    br label [[DOTOMP_DEINIT:%.*]]
18241 // CHECK16:       .omp.deinit:
18242 // CHECK16-NEXT:    call void @__kmpc_spmd_kernel_deinit_v2(i16 0)
18243 // CHECK16-NEXT:    br label [[DOTEXIT:%.*]]
18244 // CHECK16:       .exit:
18245 // CHECK16-NEXT:    ret void
18246 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__10
18247 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
18248 // CHECK16-NEXT:  entry:
18249 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18250 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18251 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18252 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
18253 // CHECK16-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
18254 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18255 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18256 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18257 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18258 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18259 // CHECK16-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18260 // CHECK16-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18261 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18262 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18263 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
18264 // CHECK16-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18265 // CHECK16-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
18266 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18267 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18268 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18269 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
18270 // CHECK16-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
18271 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
18272 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
18273 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18274 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18275 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18276 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18277 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18278 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18279 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18280 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18281 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18282 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18283 // CHECK16:       omp.precond.then:
18284 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18285 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18286 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
18287 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18288 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18289 // CHECK16-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
18290 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18291 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
18292 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
18293 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18294 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18295 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
18296 // CHECK16-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18297 // CHECK16:       cond.true:
18298 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18299 // CHECK16-NEXT:    br label [[COND_END:%.*]]
18300 // CHECK16:       cond.false:
18301 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18302 // CHECK16-NEXT:    br label [[COND_END]]
18303 // CHECK16:       cond.end:
18304 // CHECK16-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
18305 // CHECK16-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18306 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18307 // CHECK16-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
18308 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18309 // CHECK16:       omp.inner.for.cond:
18310 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18311 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18312 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
18313 // CHECK16-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
18314 // CHECK16-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18315 // CHECK16:       omp.inner.for.body:
18316 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18317 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18318 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
18319 // CHECK16-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
18320 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
18321 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
18322 // CHECK16-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
18323 // CHECK16-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
18324 // CHECK16-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
18325 // CHECK16-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
18326 // CHECK16-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
18327 // CHECK16-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
18328 // CHECK16-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
18329 // CHECK16-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
18330 // CHECK16-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
18331 // CHECK16-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
18332 // CHECK16-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
18333 // CHECK16-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
18334 // CHECK16-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
18335 // CHECK16-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
18336 // CHECK16-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
18337 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18338 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
18339 // CHECK16-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
18340 // CHECK16-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
18341 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18342 // CHECK16:       omp.inner.for.inc:
18343 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18344 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18345 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
18346 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
18347 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18348 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18349 // CHECK16-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
18350 // CHECK16-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
18351 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18352 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18353 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
18354 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
18355 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18356 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18357 // CHECK16-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
18358 // CHECK16-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
18359 // CHECK16:       cond.true10:
18360 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18361 // CHECK16-NEXT:    br label [[COND_END12:%.*]]
18362 // CHECK16:       cond.false11:
18363 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18364 // CHECK16-NEXT:    br label [[COND_END12]]
18365 // CHECK16:       cond.end12:
18366 // CHECK16-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
18367 // CHECK16-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
18368 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18369 // CHECK16-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
18370 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
18371 // CHECK16:       omp.inner.for.end:
18372 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18373 // CHECK16:       omp.loop.exit:
18374 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18375 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
18376 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP44]])
18377 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18378 // CHECK16:       omp.precond.end:
18379 // CHECK16-NEXT:    ret void
18380 // CHECK16-LABEL: define {{[^@]+}}@__omp_outlined__11
18381 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
18382 // CHECK16-NEXT:  entry:
18383 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18384 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18385 // CHECK16-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18386 // CHECK16-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18387 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18388 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
18389 // CHECK16-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
18390 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18391 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18392 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18393 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18394 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18395 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18396 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18397 // CHECK16-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18398 // CHECK16-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18399 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
18400 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18401 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18402 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18403 // CHECK16-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18404 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18405 // CHECK16-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
18406 // CHECK16-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
18407 // CHECK16-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
18408 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
18409 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18410 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18411 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18412 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18413 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18414 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18415 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
18416 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18417 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18418 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18419 // CHECK16:       omp.precond.then:
18420 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18421 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18422 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
18423 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18424 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18425 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
18426 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
18427 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18428 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18429 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18430 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
18431 // CHECK16-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18432 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18433 // CHECK16-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
18434 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18435 // CHECK16:       omp.inner.for.cond:
18436 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18437 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18438 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
18439 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18440 // CHECK16:       omp.inner.for.body:
18441 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18442 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
18443 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18444 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
18445 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
18446 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
18447 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
18448 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
18449 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
18450 // CHECK16-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
18451 // CHECK16-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
18452 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18453 // CHECK16:       omp.body.continue:
18454 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18455 // CHECK16:       omp.inner.for.inc:
18456 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18457 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18458 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
18459 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
18460 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]]
18461 // CHECK16:       omp.inner.for.end:
18462 // CHECK16-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18463 // CHECK16:       omp.loop.exit:
18464 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18465 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
18466 // CHECK16-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]])
18467 // CHECK16-NEXT:    br label [[OMP_PRECOND_END]]
18468 // CHECK16:       omp.precond.end:
18469 // CHECK16-NEXT:    ret void
18470 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
18471 // CHECK1-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
18472 // CHECK1-NEXT:  entry:
18473 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18474 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
18475 // CHECK1-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
18476 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
18477 // CHECK1-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
18478 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
18479 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
18480 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
18481 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18482 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
18483 // CHECK1-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
18484 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18485 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
18486 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
18487 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 true, i1 false, i1 false)
18488 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
18489 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
18490 // CHECK1:       user_code.entry:
18491 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
18492 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
18493 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
18494 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
18495 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
18496 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV1]], align 8
18497 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
18498 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[CONV3]], align 4
18499 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[L_CASTED]], align 8
18500 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
18501 // CHECK1-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i32]* [[TMP0]], i64 [[TMP6]]) #[[ATTR1:[0-9]+]]
18502 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
18503 // CHECK1-NEXT:    ret void
18504 // CHECK1:       worker.exit:
18505 // CHECK1-NEXT:    ret void
18506 //
18507 //
18508 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
18509 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
18510 // CHECK1-NEXT:  entry:
18511 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18512 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18513 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18514 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
18515 // CHECK1-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
18516 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18517 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18518 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18519 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
18520 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
18521 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18522 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18523 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18524 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18525 // CHECK1-NEXT:    [[I5:%.*]] = alloca i32, align 4
18526 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
18527 // CHECK1-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
18528 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
18529 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18530 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18531 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18532 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
18533 // CHECK1-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
18534 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18535 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
18536 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
18537 // CHECK1-NEXT:    [[L2:%.*]] = call i8* @__kmpc_alloc_shared(i64 4)
18538 // CHECK1-NEXT:    [[L_ON_STACK:%.*]] = bitcast i8* [[L2]] to i32*
18539 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
18540 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18541 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18542 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18543 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18544 // CHECK1-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
18545 // CHECK1-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
18546 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
18547 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18548 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18549 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18550 // CHECK1:       omp.precond.then:
18551 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18552 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18553 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
18554 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18555 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18556 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18557 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
18558 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
18559 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18560 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18561 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
18562 // CHECK1-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18563 // CHECK1:       cond.true:
18564 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18565 // CHECK1-NEXT:    br label [[COND_END:%.*]]
18566 // CHECK1:       cond.false:
18567 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18568 // CHECK1-NEXT:    br label [[COND_END]]
18569 // CHECK1:       cond.end:
18570 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
18571 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18572 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18573 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
18574 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18575 // CHECK1:       omp.inner.for.cond:
18576 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18577 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18578 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
18579 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
18580 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18581 // CHECK1:       omp.inner.for.body:
18582 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18583 // CHECK1-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
18584 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18585 // CHECK1-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
18586 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
18587 // CHECK1-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
18588 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[CONV8]], align 4
18589 // CHECK1-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
18590 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV1]], align 8
18591 // CHECK1-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
18592 // CHECK1-NEXT:    store i32 [[TMP20]], i32* [[CONV9]], align 4
18593 // CHECK1-NEXT:    [[TMP21:%.*]] = load i64, i64* [[L_CASTED]], align 8
18594 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
18595 // CHECK1-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP15]] to i8*
18596 // CHECK1-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
18597 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
18598 // CHECK1-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP17]] to i8*
18599 // CHECK1-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
18600 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
18601 // CHECK1-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP19]] to i8*
18602 // CHECK1-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
18603 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
18604 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
18605 // CHECK1-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
18606 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
18607 // CHECK1-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP21]] to i8*
18608 // CHECK1-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
18609 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18610 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
18611 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
18612 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i64 5)
18613 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18614 // CHECK1:       omp.inner.for.inc:
18615 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18616 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18617 // CHECK1-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
18618 // CHECK1-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
18619 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18620 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18621 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
18622 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
18623 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18624 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18625 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
18626 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
18627 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18628 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18629 // CHECK1-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
18630 // CHECK1-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
18631 // CHECK1:       cond.true14:
18632 // CHECK1-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
18633 // CHECK1-NEXT:    br label [[COND_END16:%.*]]
18634 // CHECK1:       cond.false15:
18635 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18636 // CHECK1-NEXT:    br label [[COND_END16]]
18637 // CHECK1:       cond.end16:
18638 // CHECK1-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE14]] ], [ [[TMP44]], [[COND_FALSE15]] ]
18639 // CHECK1-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
18640 // CHECK1-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18641 // CHECK1-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
18642 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
18643 // CHECK1:       omp.inner.for.end:
18644 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18645 // CHECK1:       omp.loop.exit:
18646 // CHECK1-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18647 // CHECK1-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
18648 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]])
18649 // CHECK1-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18650 // CHECK1-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
18651 // CHECK1-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
18652 // CHECK1:       .omp.lastprivate.then:
18653 // CHECK1-NEXT:    [[TMP50:%.*]] = load i32, i32* [[CONV1]], align 8
18654 // CHECK1-NEXT:    store i32 [[TMP50]], i32* [[CONV1]], align 8
18655 // CHECK1-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
18656 // CHECK1:       .omp.lastprivate.done:
18657 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
18658 // CHECK1:       omp.precond.end:
18659 // CHECK1-NEXT:    call void @__kmpc_free_shared(i8* [[L2]], i64 4)
18660 // CHECK1-NEXT:    ret void
18661 //
18662 //
18663 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
18664 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
18665 // CHECK1-NEXT:  entry:
18666 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18667 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18668 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
18669 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
18670 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18671 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
18672 // CHECK1-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
18673 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18674 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18675 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18676 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18677 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
18678 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18679 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18680 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18681 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18682 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
18683 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18684 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18685 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
18686 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18687 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18688 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
18689 // CHECK1-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
18690 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18691 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
18692 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
18693 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
18694 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18695 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18696 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18697 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18698 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
18699 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18700 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
18701 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18702 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18703 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18704 // CHECK1:       omp.precond.then:
18705 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18706 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18707 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
18708 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
18709 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
18710 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18711 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
18712 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
18713 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
18714 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18715 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18716 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18717 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
18718 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
18719 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18720 // CHECK1:       omp.dispatch.cond:
18721 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18722 // CHECK1-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18723 // CHECK1-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP10]] to i32
18724 // CHECK1-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP9]], [[CONV7]]
18725 // CHECK1-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18726 // CHECK1:       cond.true:
18727 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18728 // CHECK1-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP11]] to i32
18729 // CHECK1-NEXT:    br label [[COND_END:%.*]]
18730 // CHECK1:       cond.false:
18731 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18732 // CHECK1-NEXT:    br label [[COND_END]]
18733 // CHECK1:       cond.end:
18734 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
18735 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18736 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18737 // CHECK1-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
18738 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18739 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18740 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
18741 // CHECK1-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18742 // CHECK1:       omp.dispatch.body:
18743 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18744 // CHECK1:       omp.inner.for.cond:
18745 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18746 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18747 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
18748 // CHECK1-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18749 // CHECK1:       omp.inner.for.body:
18750 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18751 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
18752 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18753 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
18754 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
18755 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
18756 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
18757 // CHECK1-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
18758 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
18759 // CHECK1-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
18760 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18761 // CHECK1:       omp.body.continue:
18762 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18763 // CHECK1:       omp.inner.for.inc:
18764 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18765 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP21]], 1
18766 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
18767 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
18768 // CHECK1:       omp.inner.for.end:
18769 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18770 // CHECK1:       omp.dispatch.inc:
18771 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18772 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18773 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
18774 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_LB]], align 4
18775 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18776 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18777 // CHECK1-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
18778 // CHECK1-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_UB]], align 4
18779 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
18780 // CHECK1:       omp.dispatch.end:
18781 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18782 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
18783 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
18784 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18785 // CHECK1-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
18786 // CHECK1-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
18787 // CHECK1:       .omp.lastprivate.then:
18788 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
18789 // CHECK1-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
18790 // CHECK1-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
18791 // CHECK1:       .omp.lastprivate.done:
18792 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
18793 // CHECK1:       omp.precond.end:
18794 // CHECK1-NEXT:    ret void
18795 //
18796 //
18797 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
18798 // CHECK1-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR2:[0-9]+]] {
18799 // CHECK1-NEXT:  entry:
18800 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18801 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
18802 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
18803 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
18804 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
18805 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
18806 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18807 // CHECK1-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
18808 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18809 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
18810 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
18811 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
18812 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
18813 // CHECK1:       user_code.entry:
18814 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
18815 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
18816 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
18817 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
18818 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
18819 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
18820 // CHECK1-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i16]* [[TMP0]]) #[[ATTR1]]
18821 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
18822 // CHECK1-NEXT:    ret void
18823 // CHECK1:       worker.exit:
18824 // CHECK1-NEXT:    ret void
18825 //
18826 //
18827 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__2
18828 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
18829 // CHECK1-NEXT:  entry:
18830 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18831 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18832 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18833 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
18834 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18835 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18836 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18837 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18838 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
18839 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18840 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18841 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18842 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18843 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
18844 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
18845 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
18846 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18847 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18848 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18849 // CHECK1-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
18850 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18851 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
18852 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
18853 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18854 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18855 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18856 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18857 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18858 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18859 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
18860 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18861 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18862 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18863 // CHECK1:       omp.precond.then:
18864 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18865 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18866 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
18867 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18868 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18869 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
18870 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18871 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
18872 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
18873 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18874 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18875 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
18876 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18877 // CHECK1:       cond.true:
18878 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18879 // CHECK1-NEXT:    br label [[COND_END:%.*]]
18880 // CHECK1:       cond.false:
18881 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18882 // CHECK1-NEXT:    br label [[COND_END]]
18883 // CHECK1:       cond.end:
18884 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
18885 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18886 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18887 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
18888 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18889 // CHECK1:       omp.inner.for.cond:
18890 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18891 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18892 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
18893 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
18894 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18895 // CHECK1:       omp.inner.for.body:
18896 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18897 // CHECK1-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
18898 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18899 // CHECK1-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
18900 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
18901 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
18902 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
18903 // CHECK1-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
18904 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
18905 // CHECK1-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
18906 // CHECK1-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
18907 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
18908 // CHECK1-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
18909 // CHECK1-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
18910 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
18911 // CHECK1-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
18912 // CHECK1-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
18913 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
18914 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
18915 // CHECK1-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
18916 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18917 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
18918 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
18919 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
18920 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18921 // CHECK1:       omp.inner.for.inc:
18922 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18923 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18924 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
18925 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
18926 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18927 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18928 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
18929 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
18930 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18931 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18932 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
18933 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
18934 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18935 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18936 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
18937 // CHECK1-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
18938 // CHECK1:       cond.true11:
18939 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18940 // CHECK1-NEXT:    br label [[COND_END13:%.*]]
18941 // CHECK1:       cond.false12:
18942 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18943 // CHECK1-NEXT:    br label [[COND_END13]]
18944 // CHECK1:       cond.end13:
18945 // CHECK1-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
18946 // CHECK1-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
18947 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18948 // CHECK1-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
18949 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
18950 // CHECK1:       omp.inner.for.end:
18951 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18952 // CHECK1:       omp.loop.exit:
18953 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18954 // CHECK1-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
18955 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP43]])
18956 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
18957 // CHECK1:       omp.precond.end:
18958 // CHECK1-NEXT:    ret void
18959 //
18960 //
18961 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__3
18962 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
18963 // CHECK1-NEXT:  entry:
18964 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18965 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18966 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
18967 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
18968 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
18969 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
18970 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18971 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18972 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18973 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18974 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
18975 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18976 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18977 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18978 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18979 // CHECK1-NEXT:    [[I5:%.*]] = alloca i32, align 4
18980 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18981 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18982 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
18983 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18984 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
18985 // CHECK1-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
18986 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
18987 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
18988 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
18989 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
18990 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18991 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
18992 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18993 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18994 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18995 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
18996 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18997 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
18998 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18999 // CHECK1:       omp.precond.then:
19000 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19001 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19002 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
19003 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19004 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
19005 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19006 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
19007 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
19008 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
19009 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19010 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19011 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19012 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
19013 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19014 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19015 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
19016 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19017 // CHECK1:       omp.inner.for.cond:
19018 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19019 // CHECK1-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
19020 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19021 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
19022 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19023 // CHECK1:       omp.inner.for.body:
19024 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19025 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
19026 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19027 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
19028 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
19029 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
19030 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
19031 // CHECK1-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
19032 // CHECK1-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
19033 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
19034 // CHECK1-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
19035 // CHECK1-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
19036 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19037 // CHECK1:       omp.body.continue:
19038 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19039 // CHECK1:       omp.inner.for.inc:
19040 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19041 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19042 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
19043 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
19044 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19045 // CHECK1:       omp.inner.for.end:
19046 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19047 // CHECK1:       omp.loop.exit:
19048 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19049 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
19050 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]])
19051 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
19052 // CHECK1:       omp.precond.end:
19053 // CHECK1-NEXT:    ret void
19054 //
19055 //
19056 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
19057 // CHECK1-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19058 // CHECK1-NEXT:  entry:
19059 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19060 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
19061 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
19062 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
19063 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19064 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19065 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
19066 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
19067 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
19068 // CHECK1:       user_code.entry:
19069 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
19070 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
19071 // CHECK1-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR1]]
19072 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
19073 // CHECK1-NEXT:    ret void
19074 // CHECK1:       worker.exit:
19075 // CHECK1-NEXT:    ret void
19076 //
19077 //
19078 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__4
19079 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19080 // CHECK1-NEXT:  entry:
19081 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19082 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19083 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19084 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19085 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19086 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19087 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19088 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19089 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19090 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19091 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
19092 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19093 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19094 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19095 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19096 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19097 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
19098 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19099 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19100 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
19101 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19102 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19103 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
19104 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19105 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
19106 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19107 // CHECK1:       cond.true:
19108 // CHECK1-NEXT:    br label [[COND_END:%.*]]
19109 // CHECK1:       cond.false:
19110 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19111 // CHECK1-NEXT:    br label [[COND_END]]
19112 // CHECK1:       cond.end:
19113 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19114 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19115 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19116 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19117 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19118 // CHECK1:       omp.inner.for.cond:
19119 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19120 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
19121 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19122 // CHECK1:       omp.inner.for.body:
19123 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19124 // CHECK1-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
19125 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19126 // CHECK1-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
19127 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
19128 // CHECK1-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
19129 // CHECK1-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
19130 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
19131 // CHECK1-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
19132 // CHECK1-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
19133 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
19134 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
19135 // CHECK1-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
19136 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
19137 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
19138 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19139 // CHECK1:       omp.inner.for.inc:
19140 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19141 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19142 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
19143 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
19144 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19145 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19146 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
19147 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
19148 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19149 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19150 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
19151 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
19152 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19153 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
19154 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
19155 // CHECK1:       cond.true5:
19156 // CHECK1-NEXT:    br label [[COND_END7:%.*]]
19157 // CHECK1:       cond.false6:
19158 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19159 // CHECK1-NEXT:    br label [[COND_END7]]
19160 // CHECK1:       cond.end7:
19161 // CHECK1-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
19162 // CHECK1-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
19163 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19164 // CHECK1-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
19165 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19166 // CHECK1:       omp.inner.for.end:
19167 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19168 // CHECK1:       omp.loop.exit:
19169 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
19170 // CHECK1-NEXT:    ret void
19171 //
19172 //
19173 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__5
19174 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19175 // CHECK1-NEXT:  entry:
19176 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19177 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19178 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
19179 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
19180 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19181 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19182 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19183 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19184 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19185 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19186 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19187 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19188 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19189 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19190 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19191 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19192 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19193 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19194 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19195 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19196 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19197 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
19198 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19199 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
19200 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
19201 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
19202 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19203 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19204 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19205 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
19206 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19207 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19208 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19209 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19210 // CHECK1:       omp.inner.for.cond:
19211 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19212 // CHECK1-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
19213 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19214 // CHECK1-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
19215 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19216 // CHECK1:       omp.inner.for.body:
19217 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19218 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
19219 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19220 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
19221 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
19222 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
19223 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
19224 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
19225 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
19226 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
19227 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19228 // CHECK1:       omp.body.continue:
19229 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19230 // CHECK1:       omp.inner.for.inc:
19231 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19232 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19233 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
19234 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
19235 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19236 // CHECK1:       omp.inner.for.end:
19237 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19238 // CHECK1:       omp.loop.exit:
19239 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
19240 // CHECK1-NEXT:    ret void
19241 //
19242 //
19243 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
19244 // CHECK1-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
19245 // CHECK1-NEXT:  entry:
19246 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19247 // CHECK1-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
19248 // CHECK1-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
19249 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
19250 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
19251 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
19252 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19253 // CHECK1-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
19254 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19255 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
19256 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
19257 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
19258 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
19259 // CHECK1:       user_code.entry:
19260 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
19261 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
19262 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
19263 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
19264 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[F_CASTED]], align 8
19265 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
19266 // CHECK1-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP4]]) #[[ATTR1]]
19267 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
19268 // CHECK1-NEXT:    ret void
19269 // CHECK1:       worker.exit:
19270 // CHECK1-NEXT:    ret void
19271 //
19272 //
19273 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__6
19274 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
19275 // CHECK1-NEXT:  entry:
19276 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19277 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19278 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19279 // CHECK1-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
19280 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19281 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19282 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
19283 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19284 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19285 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19286 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19287 // CHECK1-NEXT:    [[K:%.*]] = alloca i32, align 4
19288 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19289 // CHECK1-NEXT:    [[J:%.*]] = alloca i32, align 4
19290 // CHECK1-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
19291 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
19292 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19293 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19294 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19295 // CHECK1-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
19296 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19297 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
19298 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19299 // CHECK1-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
19300 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19301 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19302 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
19303 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19304 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19305 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
19306 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19307 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
19308 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19309 // CHECK1:       cond.true:
19310 // CHECK1-NEXT:    br label [[COND_END:%.*]]
19311 // CHECK1:       cond.false:
19312 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19313 // CHECK1-NEXT:    br label [[COND_END]]
19314 // CHECK1:       cond.end:
19315 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19316 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19317 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19318 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19319 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19320 // CHECK1:       omp.inner.for.cond:
19321 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19322 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
19323 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19324 // CHECK1:       omp.inner.for.body:
19325 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19326 // CHECK1-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
19327 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19328 // CHECK1-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
19329 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
19330 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
19331 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
19332 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
19333 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
19334 // CHECK1-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
19335 // CHECK1-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
19336 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
19337 // CHECK1-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
19338 // CHECK1-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
19339 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
19340 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
19341 // CHECK1-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
19342 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
19343 // CHECK1-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
19344 // CHECK1-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
19345 // CHECK1-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
19346 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
19347 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19348 // CHECK1:       omp.inner.for.inc:
19349 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19350 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19351 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
19352 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
19353 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19354 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19355 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
19356 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
19357 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19358 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19359 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
19360 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
19361 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19362 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
19363 // CHECK1-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
19364 // CHECK1:       cond.true7:
19365 // CHECK1-NEXT:    br label [[COND_END9:%.*]]
19366 // CHECK1:       cond.false8:
19367 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19368 // CHECK1-NEXT:    br label [[COND_END9]]
19369 // CHECK1:       cond.end9:
19370 // CHECK1-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
19371 // CHECK1-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
19372 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19373 // CHECK1-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
19374 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19375 // CHECK1:       omp.inner.for.end:
19376 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19377 // CHECK1:       omp.loop.exit:
19378 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
19379 // CHECK1-NEXT:    ret void
19380 //
19381 //
19382 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__7
19383 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
19384 // CHECK1-NEXT:  entry:
19385 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19386 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19387 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
19388 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
19389 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19390 // CHECK1-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
19391 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19392 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19393 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
19394 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19395 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19396 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19397 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19398 // CHECK1-NEXT:    [[K:%.*]] = alloca i32, align 4
19399 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19400 // CHECK1-NEXT:    [[J:%.*]] = alloca i32, align 4
19401 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19402 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19403 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19404 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19405 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19406 // CHECK1-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
19407 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19408 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
19409 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19410 // CHECK1-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
19411 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19412 // CHECK1-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
19413 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19414 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
19415 // CHECK1-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
19416 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
19417 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19418 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19419 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19420 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
19421 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19422 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19423 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19424 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19425 // CHECK1:       omp.inner.for.cond:
19426 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19427 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
19428 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19429 // CHECK1-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
19430 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19431 // CHECK1:       omp.inner.for.body:
19432 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19433 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
19434 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
19435 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19436 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
19437 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19438 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19439 // CHECK1-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
19440 // CHECK1-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
19441 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
19442 // CHECK1-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
19443 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
19444 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
19445 // CHECK1-NEXT:    store i32 10, i32* [[K]], align 4
19446 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
19447 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
19448 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
19449 // CHECK1-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
19450 // CHECK1-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
19451 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
19452 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
19453 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
19454 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
19455 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
19456 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
19457 // CHECK1-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
19458 // CHECK1-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
19459 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
19460 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19461 // CHECK1:       omp.body.continue:
19462 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19463 // CHECK1:       omp.inner.for.inc:
19464 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19465 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19466 // CHECK1-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
19467 // CHECK1-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
19468 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19469 // CHECK1:       omp.inner.for.end:
19470 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19471 // CHECK1:       omp.loop.exit:
19472 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
19473 // CHECK1-NEXT:    ret void
19474 //
19475 //
19476 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l52
19477 // CHECK1-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
19478 // CHECK1-NEXT:  entry:
19479 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19480 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19481 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19482 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
19483 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
19484 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
19485 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19486 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19487 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19488 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19489 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
19490 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
19491 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
19492 // CHECK1:       user_code.entry:
19493 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
19494 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
19495 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19496 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
19497 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
19498 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
19499 // CHECK1-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR1]]
19500 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
19501 // CHECK1-NEXT:    ret void
19502 // CHECK1:       worker.exit:
19503 // CHECK1-NEXT:    ret void
19504 //
19505 //
19506 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__8
19507 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
19508 // CHECK1-NEXT:  entry:
19509 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19510 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19511 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19512 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19513 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
19514 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19515 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
19516 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19517 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19518 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
19519 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19520 // CHECK1-NEXT:    [[J:%.*]] = alloca i32, align 4
19521 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
19522 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
19523 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
19524 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19525 // CHECK1-NEXT:    [[I10:%.*]] = alloca i32, align 4
19526 // CHECK1-NEXT:    [[J11:%.*]] = alloca i32, align 4
19527 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19528 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
19529 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19530 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19531 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19532 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19533 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19534 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19535 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
19536 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
19537 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
19538 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19539 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19540 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
19541 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19542 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
19543 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19544 // CHECK1-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
19545 // CHECK1-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
19546 // CHECK1-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
19547 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
19548 // CHECK1-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
19549 // CHECK1-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
19550 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
19551 // CHECK1-NEXT:    store i32 0, i32* [[J]], align 4
19552 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19553 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
19554 // CHECK1-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
19555 // CHECK1:       land.lhs.true:
19556 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19557 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
19558 // CHECK1-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
19559 // CHECK1:       omp.precond.then:
19560 // CHECK1-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
19561 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19562 // CHECK1-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
19563 // CHECK1-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
19564 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19565 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
19566 // CHECK1-NEXT:    [[CONV12:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
19567 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19568 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19569 // CHECK1-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV12]])
19570 // CHECK1-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19571 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19572 // CHECK1-NEXT:    [[CMP13:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
19573 // CHECK1-NEXT:    br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19574 // CHECK1:       cond.true:
19575 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19576 // CHECK1-NEXT:    br label [[COND_END:%.*]]
19577 // CHECK1:       cond.false:
19578 // CHECK1-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19579 // CHECK1-NEXT:    br label [[COND_END]]
19580 // CHECK1:       cond.end:
19581 // CHECK1-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19582 // CHECK1-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
19583 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
19584 // CHECK1-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
19585 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19586 // CHECK1:       omp.inner.for.cond:
19587 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19588 // CHECK1-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19589 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
19590 // CHECK1-NEXT:    [[CMP14:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
19591 // CHECK1-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19592 // CHECK1:       omp.inner.for.body:
19593 // CHECK1-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
19594 // CHECK1-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19595 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
19596 // CHECK1-NEXT:    [[CONV15:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19597 // CHECK1-NEXT:    store i32 [[TMP19]], i32* [[CONV15]], align 4
19598 // CHECK1-NEXT:    [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
19599 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
19600 // CHECK1-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP17]] to i8*
19601 // CHECK1-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
19602 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
19603 // CHECK1-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
19604 // CHECK1-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
19605 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
19606 // CHECK1-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
19607 // CHECK1-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
19608 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
19609 // CHECK1-NEXT:    [[TMP28:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
19610 // CHECK1-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
19611 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19612 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19613 // CHECK1-NEXT:    [[TMP31:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
19614 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP31]], i64 4)
19615 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19616 // CHECK1:       omp.inner.for.inc:
19617 // CHECK1-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19618 // CHECK1-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
19619 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP32]], [[TMP33]]
19620 // CHECK1-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_IV]], align 8
19621 // CHECK1-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
19622 // CHECK1-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
19623 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
19624 // CHECK1-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_COMB_LB]], align 8
19625 // CHECK1-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19626 // CHECK1-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
19627 // CHECK1-NEXT:    [[ADD18:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
19628 // CHECK1-NEXT:    store i64 [[ADD18]], i64* [[DOTOMP_COMB_UB]], align 8
19629 // CHECK1-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19630 // CHECK1-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19631 // CHECK1-NEXT:    [[CMP19:%.*]] = icmp sgt i64 [[TMP38]], [[TMP39]]
19632 // CHECK1-NEXT:    br i1 [[CMP19]], label [[COND_TRUE20:%.*]], label [[COND_FALSE21:%.*]]
19633 // CHECK1:       cond.true20:
19634 // CHECK1-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19635 // CHECK1-NEXT:    br label [[COND_END22:%.*]]
19636 // CHECK1:       cond.false21:
19637 // CHECK1-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
19638 // CHECK1-NEXT:    br label [[COND_END22]]
19639 // CHECK1:       cond.end22:
19640 // CHECK1-NEXT:    [[COND23:%.*]] = phi i64 [ [[TMP40]], [[COND_TRUE20]] ], [ [[TMP41]], [[COND_FALSE21]] ]
19641 // CHECK1-NEXT:    store i64 [[COND23]], i64* [[DOTOMP_COMB_UB]], align 8
19642 // CHECK1-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
19643 // CHECK1-NEXT:    store i64 [[TMP42]], i64* [[DOTOMP_IV]], align 8
19644 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19645 // CHECK1:       omp.inner.for.end:
19646 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19647 // CHECK1:       omp.loop.exit:
19648 // CHECK1-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19649 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
19650 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP44]])
19651 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
19652 // CHECK1:       omp.precond.end:
19653 // CHECK1-NEXT:    ret void
19654 //
19655 //
19656 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__9
19657 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
19658 // CHECK1-NEXT:  entry:
19659 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19660 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19661 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
19662 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
19663 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19664 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
19665 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
19666 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19667 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
19668 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19669 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19670 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
19671 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19672 // CHECK1-NEXT:    [[J:%.*]] = alloca i32, align 4
19673 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
19674 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
19675 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
19676 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19677 // CHECK1-NEXT:    [[I10:%.*]] = alloca i32, align 4
19678 // CHECK1-NEXT:    [[J11:%.*]] = alloca i32, align 4
19679 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19680 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19681 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19682 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19683 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19684 // CHECK1-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
19685 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19686 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
19687 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
19688 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
19689 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
19690 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19691 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19692 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
19693 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19694 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i32 [[DIV]] to i64
19695 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19696 // CHECK1-NEXT:    [[SUB5:%.*]] = sub nsw i32 [[TMP4]], 0
19697 // CHECK1-NEXT:    [[DIV6:%.*]] = sdiv i32 [[SUB5]], 1
19698 // CHECK1-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV6]] to i64
19699 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV4]], [[CONV7]]
19700 // CHECK1-NEXT:    [[SUB8:%.*]] = sub nsw i64 [[MUL]], 1
19701 // CHECK1-NEXT:    store i64 [[SUB8]], i64* [[DOTCAPTURE_EXPR_3]], align 8
19702 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
19703 // CHECK1-NEXT:    store i32 0, i32* [[J]], align 4
19704 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19705 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
19706 // CHECK1-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
19707 // CHECK1:       land.lhs.true:
19708 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19709 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp slt i32 0, [[TMP6]]
19710 // CHECK1-NEXT:    br i1 [[CMP9]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
19711 // CHECK1:       omp.precond.then:
19712 // CHECK1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
19713 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
19714 // CHECK1-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
19715 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19716 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19717 // CHECK1-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_LB]], align 8
19718 // CHECK1-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
19719 // CHECK1-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
19720 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19721 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19722 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19723 // CHECK1-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
19724 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
19725 // CHECK1-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
19726 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19727 // CHECK1:       omp.inner.for.cond:
19728 // CHECK1-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19729 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19730 // CHECK1-NEXT:    [[CMP12:%.*]] = icmp ule i64 [[TMP13]], [[TMP14]]
19731 // CHECK1-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19732 // CHECK1:       omp.inner.for.body:
19733 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19734 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19735 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP16]], 0
19736 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
19737 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 1, [[DIV14]]
19738 // CHECK1-NEXT:    [[CONV16:%.*]] = sext i32 [[MUL15]] to i64
19739 // CHECK1-NEXT:    [[DIV17:%.*]] = sdiv i64 [[TMP15]], [[CONV16]]
19740 // CHECK1-NEXT:    [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 1
19741 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL18]]
19742 // CHECK1-NEXT:    [[CONV19:%.*]] = trunc i64 [[ADD]] to i32
19743 // CHECK1-NEXT:    store i32 [[CONV19]], i32* [[I10]], align 4
19744 // CHECK1-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19745 // CHECK1-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19746 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19747 // CHECK1-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP19]], 0
19748 // CHECK1-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
19749 // CHECK1-NEXT:    [[MUL22:%.*]] = mul nsw i32 1, [[DIV21]]
19750 // CHECK1-NEXT:    [[CONV23:%.*]] = sext i32 [[MUL22]] to i64
19751 // CHECK1-NEXT:    [[DIV24:%.*]] = sdiv i64 [[TMP18]], [[CONV23]]
19752 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19753 // CHECK1-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[TMP20]], 0
19754 // CHECK1-NEXT:    [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1
19755 // CHECK1-NEXT:    [[MUL27:%.*]] = mul nsw i32 1, [[DIV26]]
19756 // CHECK1-NEXT:    [[CONV28:%.*]] = sext i32 [[MUL27]] to i64
19757 // CHECK1-NEXT:    [[MUL29:%.*]] = mul nsw i64 [[DIV24]], [[CONV28]]
19758 // CHECK1-NEXT:    [[SUB30:%.*]] = sub nsw i64 [[TMP17]], [[MUL29]]
19759 // CHECK1-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[SUB30]], 1
19760 // CHECK1-NEXT:    [[ADD32:%.*]] = add nsw i64 0, [[MUL31]]
19761 // CHECK1-NEXT:    [[CONV33:%.*]] = trunc i64 [[ADD32]] to i32
19762 // CHECK1-NEXT:    store i32 [[CONV33]], i32* [[J11]], align 4
19763 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
19764 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
19765 // CHECK1-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
19766 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
19767 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
19768 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
19769 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
19770 // CHECK1-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP24]] to i64
19771 // CHECK1-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM35]]
19772 // CHECK1-NEXT:    store i32 [[ADD34]], i32* [[ARRAYIDX36]], align 4
19773 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19774 // CHECK1:       omp.body.continue:
19775 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19776 // CHECK1:       omp.inner.for.inc:
19777 // CHECK1-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
19778 // CHECK1-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
19779 // CHECK1-NEXT:    [[ADD37:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
19780 // CHECK1-NEXT:    store i64 [[ADD37]], i64* [[DOTOMP_IV]], align 8
19781 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19782 // CHECK1:       omp.inner.for.end:
19783 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19784 // CHECK1:       omp.loop.exit:
19785 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19786 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
19787 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP28]])
19788 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
19789 // CHECK1:       omp.precond.end:
19790 // CHECK1-NEXT:    ret void
19791 //
19792 //
19793 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
19794 // CHECK1-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
19795 // CHECK1-NEXT:  entry:
19796 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19797 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
19798 // CHECK1-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
19799 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19800 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
19801 // CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
19802 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
19803 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19804 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
19805 // CHECK1-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
19806 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19807 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
19808 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
19809 // CHECK1-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
19810 // CHECK1-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
19811 // CHECK1:       user_code.entry:
19812 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
19813 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
19814 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19815 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
19816 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
19817 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[V_ADDR]], align 8
19818 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
19819 // CHECK1-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i32]* [[TMP0]], i32* [[TMP5]]) #[[ATTR1]]
19820 // CHECK1-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
19821 // CHECK1-NEXT:    ret void
19822 // CHECK1:       worker.exit:
19823 // CHECK1-NEXT:    ret void
19824 //
19825 //
19826 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__10
19827 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
19828 // CHECK1-NEXT:  entry:
19829 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19830 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19831 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19832 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
19833 // CHECK1-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
19834 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19835 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19836 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19837 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19838 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19839 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19840 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19841 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19842 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19843 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
19844 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19845 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
19846 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19847 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19848 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19849 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
19850 // CHECK1-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
19851 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19852 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
19853 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
19854 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
19855 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19856 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
19857 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19858 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19859 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19860 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
19861 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19862 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
19863 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19864 // CHECK1:       omp.precond.then:
19865 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19866 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19867 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
19868 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19869 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19870 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
19871 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19872 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
19873 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
19874 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19875 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19876 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
19877 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19878 // CHECK1:       cond.true:
19879 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19880 // CHECK1-NEXT:    br label [[COND_END:%.*]]
19881 // CHECK1:       cond.false:
19882 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19883 // CHECK1-NEXT:    br label [[COND_END]]
19884 // CHECK1:       cond.end:
19885 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
19886 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19887 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19888 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
19889 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19890 // CHECK1:       omp.inner.for.cond:
19891 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19892 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19893 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
19894 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
19895 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19896 // CHECK1:       omp.inner.for.body:
19897 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19898 // CHECK1-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
19899 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19900 // CHECK1-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
19901 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
19902 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19903 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
19904 // CHECK1-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
19905 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
19906 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
19907 // CHECK1-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
19908 // CHECK1-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
19909 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
19910 // CHECK1-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
19911 // CHECK1-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
19912 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
19913 // CHECK1-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
19914 // CHECK1-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
19915 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
19916 // CHECK1-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
19917 // CHECK1-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
19918 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
19919 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
19920 // CHECK1-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
19921 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19922 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
19923 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
19924 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
19925 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19926 // CHECK1:       omp.inner.for.inc:
19927 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19928 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19929 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
19930 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
19931 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19932 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19933 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
19934 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
19935 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19936 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19937 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
19938 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
19939 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19940 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19941 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
19942 // CHECK1-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
19943 // CHECK1:       cond.true11:
19944 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19945 // CHECK1-NEXT:    br label [[COND_END13:%.*]]
19946 // CHECK1:       cond.false12:
19947 // CHECK1-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19948 // CHECK1-NEXT:    br label [[COND_END13]]
19949 // CHECK1:       cond.end13:
19950 // CHECK1-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
19951 // CHECK1-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
19952 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19953 // CHECK1-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
19954 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
19955 // CHECK1:       omp.inner.for.end:
19956 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19957 // CHECK1:       omp.loop.exit:
19958 // CHECK1-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19959 // CHECK1-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
19960 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP46]])
19961 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
19962 // CHECK1:       omp.precond.end:
19963 // CHECK1-NEXT:    ret void
19964 //
19965 //
19966 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__11
19967 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
19968 // CHECK1-NEXT:  entry:
19969 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19970 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19971 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
19972 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
19973 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19974 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
19975 // CHECK1-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
19976 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19977 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19978 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19979 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19980 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
19981 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19982 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19983 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19984 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19985 // CHECK1-NEXT:    [[I5:%.*]] = alloca i32, align 4
19986 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19987 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19988 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
19989 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
19990 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19991 // CHECK1-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
19992 // CHECK1-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
19993 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19994 // CHECK1-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
19995 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
19996 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
19997 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19998 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
19999 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20000 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20001 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20002 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
20003 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20004 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
20005 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20006 // CHECK1:       omp.precond.then:
20007 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20008 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20009 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
20010 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20011 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
20012 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20013 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
20014 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
20015 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
20016 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20017 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20018 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20019 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
20020 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20021 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20022 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
20023 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20024 // CHECK1:       omp.inner.for.cond:
20025 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20026 // CHECK1-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
20027 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20028 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
20029 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20030 // CHECK1:       omp.inner.for.body:
20031 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20032 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
20033 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20034 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
20035 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
20036 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
20037 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
20038 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
20039 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
20040 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
20041 // CHECK1-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
20042 // CHECK1-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
20043 // CHECK1-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
20044 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20045 // CHECK1:       omp.body.continue:
20046 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20047 // CHECK1:       omp.inner.for.inc:
20048 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20049 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20050 // CHECK1-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
20051 // CHECK1-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
20052 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
20053 // CHECK1:       omp.inner.for.end:
20054 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20055 // CHECK1:       omp.loop.exit:
20056 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20057 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
20058 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP20]])
20059 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
20060 // CHECK1:       omp.precond.end:
20061 // CHECK1-NEXT:    ret void
20062 //
20063 //
20064 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
20065 // CHECK2-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
20066 // CHECK2-NEXT:  entry:
20067 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20068 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
20069 // CHECK2-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
20070 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20071 // CHECK2-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
20072 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
20073 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
20074 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
20075 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20076 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
20077 // CHECK2-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
20078 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20079 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
20080 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
20081 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 true, i1 false, i1 false)
20082 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
20083 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
20084 // CHECK2:       user_code.entry:
20085 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
20086 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
20087 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20088 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
20089 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
20090 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV1]], align 8
20091 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[L_CASTED]] to i32*
20092 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[CONV3]], align 4
20093 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[L_CASTED]], align 8
20094 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
20095 // CHECK2-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i32]* [[TMP0]], i64 [[TMP6]]) #[[ATTR1:[0-9]+]]
20096 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
20097 // CHECK2-NEXT:    ret void
20098 // CHECK2:       worker.exit:
20099 // CHECK2-NEXT:    ret void
20100 //
20101 //
20102 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
20103 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
20104 // CHECK2-NEXT:  entry:
20105 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20106 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20107 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20108 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
20109 // CHECK2-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
20110 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20111 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20112 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20113 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20114 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20115 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20116 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20117 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20118 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20119 // CHECK2-NEXT:    [[I5:%.*]] = alloca i32, align 4
20120 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20121 // CHECK2-NEXT:    [[L_CASTED:%.*]] = alloca i64, align 8
20122 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
20123 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20124 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20125 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20126 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
20127 // CHECK2-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
20128 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20129 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
20130 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
20131 // CHECK2-NEXT:    [[L2:%.*]] = call i8* @__kmpc_alloc_shared(i64 4)
20132 // CHECK2-NEXT:    [[L_ON_STACK:%.*]] = bitcast i8* [[L2]] to i32*
20133 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
20134 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20135 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20136 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
20137 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20138 // CHECK2-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
20139 // CHECK2-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20140 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
20141 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20142 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
20143 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20144 // CHECK2:       omp.precond.then:
20145 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20146 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20147 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
20148 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20149 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20150 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20151 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
20152 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
20153 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20154 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20155 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
20156 // CHECK2-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20157 // CHECK2:       cond.true:
20158 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20159 // CHECK2-NEXT:    br label [[COND_END:%.*]]
20160 // CHECK2:       cond.false:
20161 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20162 // CHECK2-NEXT:    br label [[COND_END]]
20163 // CHECK2:       cond.end:
20164 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
20165 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20166 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20167 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
20168 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20169 // CHECK2:       omp.inner.for.cond:
20170 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20171 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20172 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
20173 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
20174 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20175 // CHECK2:       omp.inner.for.body:
20176 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20177 // CHECK2-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
20178 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20179 // CHECK2-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
20180 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
20181 // CHECK2-NEXT:    [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20182 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[CONV8]], align 4
20183 // CHECK2-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
20184 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV1]], align 8
20185 // CHECK2-NEXT:    [[CONV9:%.*]] = bitcast i64* [[L_CASTED]] to i32*
20186 // CHECK2-NEXT:    store i32 [[TMP20]], i32* [[CONV9]], align 4
20187 // CHECK2-NEXT:    [[TMP21:%.*]] = load i64, i64* [[L_CASTED]], align 8
20188 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
20189 // CHECK2-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP15]] to i8*
20190 // CHECK2-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
20191 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
20192 // CHECK2-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP17]] to i8*
20193 // CHECK2-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
20194 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
20195 // CHECK2-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP19]] to i8*
20196 // CHECK2-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
20197 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
20198 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
20199 // CHECK2-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
20200 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
20201 // CHECK2-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP21]] to i8*
20202 // CHECK2-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
20203 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20204 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
20205 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
20206 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i64 5)
20207 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20208 // CHECK2:       omp.inner.for.inc:
20209 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20210 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20211 // CHECK2-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
20212 // CHECK2-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
20213 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20214 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20215 // CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
20216 // CHECK2-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
20217 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20218 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20219 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
20220 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
20221 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20222 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20223 // CHECK2-NEXT:    [[CMP13:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
20224 // CHECK2-NEXT:    br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
20225 // CHECK2:       cond.true14:
20226 // CHECK2-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20227 // CHECK2-NEXT:    br label [[COND_END16:%.*]]
20228 // CHECK2:       cond.false15:
20229 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20230 // CHECK2-NEXT:    br label [[COND_END16]]
20231 // CHECK2:       cond.end16:
20232 // CHECK2-NEXT:    [[COND17:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE14]] ], [ [[TMP44]], [[COND_FALSE15]] ]
20233 // CHECK2-NEXT:    store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
20234 // CHECK2-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20235 // CHECK2-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
20236 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20237 // CHECK2:       omp.inner.for.end:
20238 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20239 // CHECK2:       omp.loop.exit:
20240 // CHECK2-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20241 // CHECK2-NEXT:    [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
20242 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]])
20243 // CHECK2-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20244 // CHECK2-NEXT:    [[TMP49:%.*]] = icmp ne i32 [[TMP48]], 0
20245 // CHECK2-NEXT:    br i1 [[TMP49]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
20246 // CHECK2:       .omp.lastprivate.then:
20247 // CHECK2-NEXT:    [[TMP50:%.*]] = load i32, i32* [[CONV1]], align 8
20248 // CHECK2-NEXT:    store i32 [[TMP50]], i32* [[CONV1]], align 8
20249 // CHECK2-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
20250 // CHECK2:       .omp.lastprivate.done:
20251 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
20252 // CHECK2:       omp.precond.end:
20253 // CHECK2-NEXT:    call void @__kmpc_free_shared(i8* [[L2]], i64 4)
20254 // CHECK2-NEXT:    ret void
20255 //
20256 //
20257 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
20258 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0]] {
20259 // CHECK2-NEXT:  entry:
20260 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20261 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20262 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20263 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20264 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20265 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
20266 // CHECK2-NEXT:    [[L_ADDR:%.*]] = alloca i64, align 8
20267 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20268 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20269 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20270 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20271 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20272 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20273 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20274 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20275 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20276 // CHECK2-NEXT:    [[I6:%.*]] = alloca i32, align 4
20277 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20278 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20279 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20280 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20281 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20282 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
20283 // CHECK2-NEXT:    store i64 [[L]], i64* [[L_ADDR]], align 8
20284 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20285 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
20286 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[L_ADDR]] to i32*
20287 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
20288 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20289 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20290 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
20291 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20292 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
20293 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20294 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
20295 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20296 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
20297 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20298 // CHECK2:       omp.precond.then:
20299 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20300 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20301 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
20302 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20303 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
20304 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20305 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
20306 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
20307 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
20308 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20309 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20310 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20311 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
20312 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
20313 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20314 // CHECK2:       omp.dispatch.cond:
20315 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20316 // CHECK2-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20317 // CHECK2-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP10]] to i32
20318 // CHECK2-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP9]], [[CONV7]]
20319 // CHECK2-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20320 // CHECK2:       cond.true:
20321 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20322 // CHECK2-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP11]] to i32
20323 // CHECK2-NEXT:    br label [[COND_END:%.*]]
20324 // CHECK2:       cond.false:
20325 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20326 // CHECK2-NEXT:    br label [[COND_END]]
20327 // CHECK2:       cond.end:
20328 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
20329 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20330 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20331 // CHECK2-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
20332 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20333 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20334 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
20335 // CHECK2-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20336 // CHECK2:       omp.dispatch.body:
20337 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20338 // CHECK2:       omp.inner.for.cond:
20339 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20340 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20341 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
20342 // CHECK2-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20343 // CHECK2:       omp.inner.for.body:
20344 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20345 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
20346 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20347 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4
20348 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I6]], align 4
20349 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
20350 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
20351 // CHECK2-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
20352 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I6]], align 4
20353 // CHECK2-NEXT:    store i32 [[TMP20]], i32* [[CONV1]], align 8
20354 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20355 // CHECK2:       omp.body.continue:
20356 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20357 // CHECK2:       omp.inner.for.inc:
20358 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20359 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP21]], 1
20360 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
20361 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20362 // CHECK2:       omp.inner.for.end:
20363 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20364 // CHECK2:       omp.dispatch.inc:
20365 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20366 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20367 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
20368 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_LB]], align 4
20369 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20370 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20371 // CHECK2-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
20372 // CHECK2-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_UB]], align 4
20373 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
20374 // CHECK2:       omp.dispatch.end:
20375 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20376 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
20377 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
20378 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20379 // CHECK2-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
20380 // CHECK2-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
20381 // CHECK2:       .omp.lastprivate.then:
20382 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[CONV1]], align 8
20383 // CHECK2-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
20384 // CHECK2-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
20385 // CHECK2:       .omp.lastprivate.done:
20386 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
20387 // CHECK2:       omp.precond.end:
20388 // CHECK2-NEXT:    ret void
20389 //
20390 //
20391 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
20392 // CHECK2-SAME: (i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR2:[0-9]+]] {
20393 // CHECK2-NEXT:  entry:
20394 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20395 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
20396 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20397 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
20398 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
20399 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
20400 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20401 // CHECK2-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
20402 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20403 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
20404 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
20405 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
20406 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
20407 // CHECK2:       user_code.entry:
20408 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
20409 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
20410 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20411 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
20412 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
20413 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
20414 // CHECK2-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i16]* [[TMP0]]) #[[ATTR1]]
20415 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
20416 // CHECK2-NEXT:    ret void
20417 // CHECK2:       worker.exit:
20418 // CHECK2-NEXT:    ret void
20419 //
20420 //
20421 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2
20422 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
20423 // CHECK2-NEXT:  entry:
20424 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20425 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20426 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20427 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
20428 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20429 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20430 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20431 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20432 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20433 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20434 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20435 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20436 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20437 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
20438 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
20439 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
20440 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20441 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20442 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20443 // CHECK2-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
20444 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20445 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
20446 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
20447 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20448 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20449 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
20450 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20451 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20452 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20453 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
20454 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20455 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
20456 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20457 // CHECK2:       omp.precond.then:
20458 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20459 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20460 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
20461 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20462 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20463 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
20464 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20465 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
20466 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
20467 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20468 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20469 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
20470 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20471 // CHECK2:       cond.true:
20472 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20473 // CHECK2-NEXT:    br label [[COND_END:%.*]]
20474 // CHECK2:       cond.false:
20475 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20476 // CHECK2-NEXT:    br label [[COND_END]]
20477 // CHECK2:       cond.end:
20478 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
20479 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20480 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20481 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
20482 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20483 // CHECK2:       omp.inner.for.cond:
20484 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20485 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20486 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
20487 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
20488 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20489 // CHECK2:       omp.inner.for.body:
20490 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20491 // CHECK2-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
20492 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20493 // CHECK2-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
20494 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
20495 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20496 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
20497 // CHECK2-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
20498 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
20499 // CHECK2-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to i8*
20500 // CHECK2-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 8
20501 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
20502 // CHECK2-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to i8*
20503 // CHECK2-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 8
20504 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
20505 // CHECK2-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to i8*
20506 // CHECK2-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
20507 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
20508 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
20509 // CHECK2-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
20510 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20511 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
20512 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
20513 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP30]], i64 4)
20514 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20515 // CHECK2:       omp.inner.for.inc:
20516 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20517 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20518 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
20519 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
20520 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20521 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20522 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
20523 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
20524 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20525 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20526 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
20527 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
20528 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20529 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20530 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
20531 // CHECK2-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
20532 // CHECK2:       cond.true11:
20533 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20534 // CHECK2-NEXT:    br label [[COND_END13:%.*]]
20535 // CHECK2:       cond.false12:
20536 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20537 // CHECK2-NEXT:    br label [[COND_END13]]
20538 // CHECK2:       cond.end13:
20539 // CHECK2-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE11]] ], [ [[TMP40]], [[COND_FALSE12]] ]
20540 // CHECK2-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
20541 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20542 // CHECK2-NEXT:    store i32 [[TMP41]], i32* [[DOTOMP_IV]], align 4
20543 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20544 // CHECK2:       omp.inner.for.end:
20545 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20546 // CHECK2:       omp.loop.exit:
20547 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20548 // CHECK2-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP42]], align 4
20549 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP43]])
20550 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
20551 // CHECK2:       omp.precond.end:
20552 // CHECK2-NEXT:    ret void
20553 //
20554 //
20555 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__3
20556 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
20557 // CHECK2-NEXT:  entry:
20558 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20559 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20560 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20561 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20562 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20563 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 8
20564 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20565 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20566 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20567 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20568 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20569 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20570 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20571 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20572 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20573 // CHECK2-NEXT:    [[I5:%.*]] = alloca i32, align 4
20574 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20575 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20576 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20577 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20578 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20579 // CHECK2-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 8
20580 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20581 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 8
20582 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
20583 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20584 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20585 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
20586 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20587 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20588 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20589 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
20590 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20591 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
20592 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20593 // CHECK2:       omp.precond.then:
20594 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20595 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20596 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
20597 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20598 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
20599 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20600 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
20601 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
20602 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
20603 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20604 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20605 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20606 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
20607 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20608 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20609 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
20610 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20611 // CHECK2:       omp.inner.for.cond:
20612 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20613 // CHECK2-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
20614 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20615 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
20616 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20617 // CHECK2:       omp.inner.for.body:
20618 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20619 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
20620 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20621 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
20622 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I5]], align 4
20623 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
20624 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i64 0, i64 [[IDXPROM]]
20625 // CHECK2-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
20626 // CHECK2-NEXT:    [[CONV8:%.*]] = sext i16 [[TMP14]] to i32
20627 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 [[CONV8]], 1
20628 // CHECK2-NEXT:    [[CONV10:%.*]] = trunc i32 [[ADD9]] to i16
20629 // CHECK2-NEXT:    store i16 [[CONV10]], i16* [[ARRAYIDX]], align 2
20630 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20631 // CHECK2:       omp.body.continue:
20632 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20633 // CHECK2:       omp.inner.for.inc:
20634 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20635 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20636 // CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
20637 // CHECK2-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
20638 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20639 // CHECK2:       omp.inner.for.end:
20640 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20641 // CHECK2:       omp.loop.exit:
20642 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20643 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
20644 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]])
20645 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
20646 // CHECK2:       omp.precond.end:
20647 // CHECK2-NEXT:    ret void
20648 //
20649 //
20650 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
20651 // CHECK2-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20652 // CHECK2-NEXT:  entry:
20653 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20654 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
20655 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
20656 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
20657 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20658 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20659 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
20660 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
20661 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
20662 // CHECK2:       user_code.entry:
20663 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
20664 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
20665 // CHECK2-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR1]]
20666 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
20667 // CHECK2-NEXT:    ret void
20668 // CHECK2:       worker.exit:
20669 // CHECK2-NEXT:    ret void
20670 //
20671 //
20672 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__4
20673 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20674 // CHECK2-NEXT:  entry:
20675 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20676 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20677 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20678 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20679 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20680 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20681 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20682 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20683 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20684 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20685 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
20686 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20687 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20688 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20689 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20690 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20691 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
20692 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20693 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20694 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
20695 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20696 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
20697 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
20698 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20699 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
20700 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20701 // CHECK2:       cond.true:
20702 // CHECK2-NEXT:    br label [[COND_END:%.*]]
20703 // CHECK2:       cond.false:
20704 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20705 // CHECK2-NEXT:    br label [[COND_END]]
20706 // CHECK2:       cond.end:
20707 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
20708 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20709 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20710 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
20711 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20712 // CHECK2:       omp.inner.for.cond:
20713 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20714 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
20715 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20716 // CHECK2:       omp.inner.for.body:
20717 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20718 // CHECK2-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
20719 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20720 // CHECK2-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
20721 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
20722 // CHECK2-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to i8*
20723 // CHECK2-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
20724 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
20725 // CHECK2-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to i8*
20726 // CHECK2-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
20727 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
20728 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
20729 // CHECK2-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
20730 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
20731 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP17]], i64 3)
20732 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20733 // CHECK2:       omp.inner.for.inc:
20734 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20735 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20736 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
20737 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
20738 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20739 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20740 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
20741 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
20742 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20743 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20744 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
20745 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
20746 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20747 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP24]], 9
20748 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
20749 // CHECK2:       cond.true5:
20750 // CHECK2-NEXT:    br label [[COND_END7:%.*]]
20751 // CHECK2:       cond.false6:
20752 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20753 // CHECK2-NEXT:    br label [[COND_END7]]
20754 // CHECK2:       cond.end7:
20755 // CHECK2-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP25]], [[COND_FALSE6]] ]
20756 // CHECK2-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
20757 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20758 // CHECK2-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV]], align 4
20759 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20760 // CHECK2:       omp.inner.for.end:
20761 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20762 // CHECK2:       omp.loop.exit:
20763 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
20764 // CHECK2-NEXT:    ret void
20765 //
20766 //
20767 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__5
20768 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20769 // CHECK2-NEXT:  entry:
20770 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20771 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20772 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20773 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20774 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20775 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20776 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20777 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20778 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20779 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20780 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20781 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20782 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20783 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20784 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20785 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20786 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20787 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20788 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20789 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20790 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20791 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
20792 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20793 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
20794 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
20795 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
20796 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20797 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20798 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20799 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
20800 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20801 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20802 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
20803 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20804 // CHECK2:       omp.inner.for.cond:
20805 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20806 // CHECK2-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
20807 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20808 // CHECK2-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
20809 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20810 // CHECK2:       omp.inner.for.body:
20811 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20812 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
20813 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20814 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
20815 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
20816 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
20817 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
20818 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
20819 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
20820 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
20821 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20822 // CHECK2:       omp.body.continue:
20823 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20824 // CHECK2:       omp.inner.for.inc:
20825 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20826 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20827 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
20828 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
20829 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20830 // CHECK2:       omp.inner.for.end:
20831 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20832 // CHECK2:       omp.loop.exit:
20833 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
20834 // CHECK2-NEXT:    ret void
20835 //
20836 //
20837 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
20838 // CHECK2-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
20839 // CHECK2-NEXT:  entry:
20840 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
20841 // CHECK2-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
20842 // CHECK2-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
20843 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
20844 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
20845 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
20846 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
20847 // CHECK2-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
20848 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
20849 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
20850 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
20851 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
20852 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
20853 // CHECK2:       user_code.entry:
20854 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
20855 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
20856 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[F_CASTED]] to i32*
20857 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
20858 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[F_CASTED]], align 8
20859 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
20860 // CHECK2-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i64 [[TMP4]]) #[[ATTR1]]
20861 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
20862 // CHECK2-NEXT:    ret void
20863 // CHECK2:       worker.exit:
20864 // CHECK2-NEXT:    ret void
20865 //
20866 //
20867 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__6
20868 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
20869 // CHECK2-NEXT:  entry:
20870 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20871 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20872 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
20873 // CHECK2-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
20874 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20875 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20876 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
20877 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20878 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20879 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20880 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20881 // CHECK2-NEXT:    [[K:%.*]] = alloca i32, align 4
20882 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20883 // CHECK2-NEXT:    [[J:%.*]] = alloca i32, align 4
20884 // CHECK2-NEXT:    [[F_CASTED:%.*]] = alloca i64, align 8
20885 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
20886 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20887 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20888 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
20889 // CHECK2-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
20890 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
20891 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
20892 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20893 // CHECK2-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
20894 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20895 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20896 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
20897 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20898 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
20899 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
20900 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20901 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
20902 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20903 // CHECK2:       cond.true:
20904 // CHECK2-NEXT:    br label [[COND_END:%.*]]
20905 // CHECK2:       cond.false:
20906 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20907 // CHECK2-NEXT:    br label [[COND_END]]
20908 // CHECK2:       cond.end:
20909 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
20910 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20911 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20912 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
20913 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20914 // CHECK2:       omp.inner.for.cond:
20915 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20916 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
20917 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20918 // CHECK2:       omp.inner.for.body:
20919 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20920 // CHECK2-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
20921 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20922 // CHECK2-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
20923 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[CONV]], align 8
20924 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[F_CASTED]] to i32*
20925 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
20926 // CHECK2-NEXT:    [[TMP12:%.*]] = load i64, i64* [[F_CASTED]], align 8
20927 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
20928 // CHECK2-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to i8*
20929 // CHECK2-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
20930 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
20931 // CHECK2-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to i8*
20932 // CHECK2-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
20933 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
20934 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
20935 // CHECK2-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
20936 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
20937 // CHECK2-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP12]] to i8*
20938 // CHECK2-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8
20939 // CHECK2-NEXT:    [[TMP21:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
20940 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, [10 x [10 x i32]]*, i64)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP21]], i64 4)
20941 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20942 // CHECK2:       omp.inner.for.inc:
20943 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20944 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20945 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
20946 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
20947 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20948 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20949 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
20950 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_LB]], align 4
20951 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20952 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20953 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
20954 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_COMB_UB]], align 4
20955 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20956 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP28]], 99
20957 // CHECK2-NEXT:    br i1 [[CMP6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
20958 // CHECK2:       cond.true7:
20959 // CHECK2-NEXT:    br label [[COND_END9:%.*]]
20960 // CHECK2:       cond.false8:
20961 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20962 // CHECK2-NEXT:    br label [[COND_END9]]
20963 // CHECK2:       cond.end9:
20964 // CHECK2-NEXT:    [[COND10:%.*]] = phi i32 [ 99, [[COND_TRUE7]] ], [ [[TMP29]], [[COND_FALSE8]] ]
20965 // CHECK2-NEXT:    store i32 [[COND10]], i32* [[DOTOMP_COMB_UB]], align 4
20966 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20967 // CHECK2-NEXT:    store i32 [[TMP30]], i32* [[DOTOMP_IV]], align 4
20968 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
20969 // CHECK2:       omp.inner.for.end:
20970 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20971 // CHECK2:       omp.loop.exit:
20972 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
20973 // CHECK2-NEXT:    ret void
20974 //
20975 //
20976 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__7
20977 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
20978 // CHECK2-NEXT:  entry:
20979 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20980 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20981 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
20982 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
20983 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
20984 // CHECK2-NEXT:    [[F_ADDR:%.*]] = alloca i64, align 8
20985 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20986 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20987 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
20988 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20989 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20990 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20991 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20992 // CHECK2-NEXT:    [[K:%.*]] = alloca i32, align 4
20993 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
20994 // CHECK2-NEXT:    [[J:%.*]] = alloca i32, align 4
20995 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20996 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20997 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
20998 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
20999 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
21000 // CHECK2-NEXT:    store i64 [[F]], i64* [[F_ADDR]], align 8
21001 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
21002 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[F_ADDR]] to i32*
21003 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21004 // CHECK2-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
21005 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21006 // CHECK2-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32
21007 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21008 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP2]] to i32
21009 // CHECK2-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
21010 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
21011 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21012 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21013 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21014 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
21015 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21016 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21017 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21018 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21019 // CHECK2:       omp.inner.for.cond:
21020 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21021 // CHECK2-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP6]] to i64
21022 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21023 // CHECK2-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV4]], [[TMP7]]
21024 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21025 // CHECK2:       omp.inner.for.body:
21026 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21027 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
21028 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
21029 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21030 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
21031 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21032 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21033 // CHECK2-NEXT:    [[DIV5:%.*]] = sdiv i32 [[TMP10]], 10
21034 // CHECK2-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[DIV5]], 10
21035 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL6]]
21036 // CHECK2-NEXT:    [[MUL7:%.*]] = mul nsw i32 [[SUB]], 1
21037 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL7]]
21038 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[J]], align 4
21039 // CHECK2-NEXT:    store i32 10, i32* [[K]], align 4
21040 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
21041 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
21042 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8
21043 // CHECK2-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
21044 // CHECK2-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP11]], [[MUL9]]
21045 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
21046 // CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD10]], [[TMP14]]
21047 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
21048 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
21049 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21050 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
21051 // CHECK2-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP16]] to i64
21052 // CHECK2-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM12]]
21053 // CHECK2-NEXT:    store i32 [[ADD11]], i32* [[ARRAYIDX13]], align 4
21054 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21055 // CHECK2:       omp.body.continue:
21056 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21057 // CHECK2:       omp.inner.for.inc:
21058 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21059 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21060 // CHECK2-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
21061 // CHECK2-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
21062 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
21063 // CHECK2:       omp.inner.for.end:
21064 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21065 // CHECK2:       omp.loop.exit:
21066 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
21067 // CHECK2-NEXT:    ret void
21068 //
21069 //
21070 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l52
21071 // CHECK2-SAME: (i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
21072 // CHECK2-NEXT:  entry:
21073 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21074 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
21075 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
21076 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
21077 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
21078 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
21079 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21080 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
21081 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21082 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
21083 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
21084 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
21085 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
21086 // CHECK2:       user_code.entry:
21087 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
21088 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
21089 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
21090 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
21091 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
21092 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
21093 // CHECK2-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR1]]
21094 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
21095 // CHECK2-NEXT:    ret void
21096 // CHECK2:       worker.exit:
21097 // CHECK2-NEXT:    ret void
21098 //
21099 //
21100 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__8
21101 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
21102 // CHECK2-NEXT:  entry:
21103 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21104 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21105 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21106 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
21107 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21108 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21109 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
21110 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21111 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21112 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
21113 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
21114 // CHECK2-NEXT:    [[J:%.*]] = alloca i32, align 4
21115 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21116 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21117 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21118 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21119 // CHECK2-NEXT:    [[I8:%.*]] = alloca i32, align 4
21120 // CHECK2-NEXT:    [[J9:%.*]] = alloca i32, align 4
21121 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
21122 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 8
21123 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21124 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21125 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21126 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
21127 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21128 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
21129 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21130 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21131 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
21132 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21133 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21134 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
21135 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21136 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21137 // CHECK2-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
21138 // CHECK2-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
21139 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
21140 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
21141 // CHECK2-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
21142 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
21143 // CHECK2-NEXT:    store i32 0, i32* [[J]], align 4
21144 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21145 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
21146 // CHECK2-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
21147 // CHECK2:       land.lhs.true:
21148 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21149 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
21150 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
21151 // CHECK2:       omp.precond.then:
21152 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21153 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21154 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
21155 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21156 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21157 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
21158 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21159 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21160 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
21161 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21162 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21163 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
21164 // CHECK2-NEXT:    br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21165 // CHECK2:       cond.true:
21166 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21167 // CHECK2-NEXT:    br label [[COND_END:%.*]]
21168 // CHECK2:       cond.false:
21169 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21170 // CHECK2-NEXT:    br label [[COND_END]]
21171 // CHECK2:       cond.end:
21172 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
21173 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21174 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21175 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
21176 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21177 // CHECK2:       omp.inner.for.cond:
21178 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21179 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21180 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
21181 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
21182 // CHECK2-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21183 // CHECK2:       omp.inner.for.body:
21184 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21185 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
21186 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21187 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
21188 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV]], align 8
21189 // CHECK2-NEXT:    [[CONV12:%.*]] = bitcast i64* [[N_CASTED]] to i32*
21190 // CHECK2-NEXT:    store i32 [[TMP21]], i32* [[CONV12]], align 4
21191 // CHECK2-NEXT:    [[TMP22:%.*]] = load i64, i64* [[N_CASTED]], align 8
21192 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
21193 // CHECK2-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to i8*
21194 // CHECK2-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
21195 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
21196 // CHECK2-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to i8*
21197 // CHECK2-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
21198 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
21199 // CHECK2-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP22]] to i8*
21200 // CHECK2-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
21201 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
21202 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
21203 // CHECK2-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
21204 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21205 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
21206 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
21207 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i64 4)
21208 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21209 // CHECK2:       omp.inner.for.inc:
21210 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21211 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21212 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
21213 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
21214 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21215 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21216 // CHECK2-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
21217 // CHECK2-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_COMB_LB]], align 4
21218 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21219 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21220 // CHECK2-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
21221 // CHECK2-NEXT:    store i32 [[ADD15]], i32* [[DOTOMP_COMB_UB]], align 4
21222 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21223 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21224 // CHECK2-NEXT:    [[CMP16:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
21225 // CHECK2-NEXT:    br i1 [[CMP16]], label [[COND_TRUE17:%.*]], label [[COND_FALSE18:%.*]]
21226 // CHECK2:       cond.true17:
21227 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21228 // CHECK2-NEXT:    br label [[COND_END19:%.*]]
21229 // CHECK2:       cond.false18:
21230 // CHECK2-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21231 // CHECK2-NEXT:    br label [[COND_END19]]
21232 // CHECK2:       cond.end19:
21233 // CHECK2-NEXT:    [[COND20:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE17]] ], [ [[TMP43]], [[COND_FALSE18]] ]
21234 // CHECK2-NEXT:    store i32 [[COND20]], i32* [[DOTOMP_COMB_UB]], align 4
21235 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21236 // CHECK2-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
21237 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
21238 // CHECK2:       omp.inner.for.end:
21239 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21240 // CHECK2:       omp.loop.exit:
21241 // CHECK2-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21242 // CHECK2-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
21243 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP46]])
21244 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
21245 // CHECK2:       omp.precond.end:
21246 // CHECK2-NEXT:    ret void
21247 //
21248 //
21249 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__9
21250 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
21251 // CHECK2-NEXT:  entry:
21252 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21253 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21254 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21255 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21256 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21257 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 8
21258 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21259 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21260 // CHECK2-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
21261 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21262 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21263 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
21264 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
21265 // CHECK2-NEXT:    [[J:%.*]] = alloca i32, align 4
21266 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21267 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21268 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21269 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21270 // CHECK2-NEXT:    [[I10:%.*]] = alloca i32, align 4
21271 // CHECK2-NEXT:    [[J11:%.*]] = alloca i32, align 4
21272 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21273 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21274 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21275 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21276 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21277 // CHECK2-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 8
21278 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21279 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 8
21280 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21281 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21282 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
21283 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21284 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21285 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
21286 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21287 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21288 // CHECK2-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
21289 // CHECK2-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
21290 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
21291 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
21292 // CHECK2-NEXT:    store i32 [[SUB6]], i32* [[DOTCAPTURE_EXPR_3]], align 4
21293 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
21294 // CHECK2-NEXT:    store i32 0, i32* [[J]], align 4
21295 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21296 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
21297 // CHECK2-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
21298 // CHECK2:       land.lhs.true:
21299 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21300 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
21301 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
21302 // CHECK2:       omp.precond.then:
21303 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21304 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21305 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21306 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21307 // CHECK2-NEXT:    [[CONV8:%.*]] = trunc i64 [[TMP8]] to i32
21308 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21309 // CHECK2-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP9]] to i32
21310 // CHECK2-NEXT:    store i32 [[CONV8]], i32* [[DOTOMP_LB]], align 4
21311 // CHECK2-NEXT:    store i32 [[CONV9]], i32* [[DOTOMP_UB]], align 4
21312 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21313 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21314 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21315 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21316 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21317 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21318 // CHECK2-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
21319 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21320 // CHECK2:       omp.inner.for.cond:
21321 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21322 // CHECK2-NEXT:    [[CONV12:%.*]] = sext i32 [[TMP13]] to i64
21323 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21324 // CHECK2-NEXT:    [[CMP13:%.*]] = icmp ule i64 [[CONV12]], [[TMP14]]
21325 // CHECK2-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21326 // CHECK2:       omp.inner.for.body:
21327 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21328 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21329 // CHECK2-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP16]], 0
21330 // CHECK2-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
21331 // CHECK2-NEXT:    [[MUL16:%.*]] = mul nsw i32 1, [[DIV15]]
21332 // CHECK2-NEXT:    [[DIV17:%.*]] = sdiv i32 [[TMP15]], [[MUL16]]
21333 // CHECK2-NEXT:    [[MUL18:%.*]] = mul nsw i32 [[DIV17]], 1
21334 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL18]]
21335 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I10]], align 4
21336 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21337 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21338 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21339 // CHECK2-NEXT:    [[SUB19:%.*]] = sub nsw i32 [[TMP19]], 0
21340 // CHECK2-NEXT:    [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1
21341 // CHECK2-NEXT:    [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]]
21342 // CHECK2-NEXT:    [[DIV22:%.*]] = sdiv i32 [[TMP18]], [[MUL21]]
21343 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21344 // CHECK2-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP20]], 0
21345 // CHECK2-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
21346 // CHECK2-NEXT:    [[MUL25:%.*]] = mul nsw i32 1, [[DIV24]]
21347 // CHECK2-NEXT:    [[MUL26:%.*]] = mul nsw i32 [[DIV22]], [[MUL25]]
21348 // CHECK2-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP17]], [[MUL26]]
21349 // CHECK2-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[SUB27]], 1
21350 // CHECK2-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
21351 // CHECK2-NEXT:    store i32 [[ADD29]], i32* [[J11]], align 4
21352 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I10]], align 4
21353 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J11]], align 4
21354 // CHECK2-NEXT:    [[ADD30:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
21355 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I10]], align 4
21356 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
21357 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
21358 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J11]], align 4
21359 // CHECK2-NEXT:    [[IDXPROM31:%.*]] = sext i32 [[TMP24]] to i64
21360 // CHECK2-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM31]]
21361 // CHECK2-NEXT:    store i32 [[ADD30]], i32* [[ARRAYIDX32]], align 4
21362 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21363 // CHECK2:       omp.body.continue:
21364 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21365 // CHECK2:       omp.inner.for.inc:
21366 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21367 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21368 // CHECK2-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
21369 // CHECK2-NEXT:    store i32 [[ADD33]], i32* [[DOTOMP_IV]], align 4
21370 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
21371 // CHECK2:       omp.inner.for.end:
21372 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21373 // CHECK2:       omp.loop.exit:
21374 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21375 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
21376 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP28]])
21377 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
21378 // CHECK2:       omp.precond.end:
21379 // CHECK2-NEXT:    ret void
21380 //
21381 //
21382 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
21383 // CHECK2-SAME: (i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
21384 // CHECK2-NEXT:  entry:
21385 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21386 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
21387 // CHECK2-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
21388 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
21389 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
21390 // CHECK2-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
21391 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
21392 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21393 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
21394 // CHECK2-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
21395 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21396 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
21397 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
21398 // CHECK2-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
21399 // CHECK2-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
21400 // CHECK2:       user_code.entry:
21401 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
21402 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
21403 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
21404 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
21405 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
21406 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[V_ADDR]], align 8
21407 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
21408 // CHECK2-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], [1000 x i32]* [[TMP0]], i32* [[TMP5]]) #[[ATTR1]]
21409 // CHECK2-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
21410 // CHECK2-NEXT:    ret void
21411 // CHECK2:       worker.exit:
21412 // CHECK2-NEXT:    ret void
21413 //
21414 //
21415 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__10
21416 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
21417 // CHECK2-NEXT:  entry:
21418 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21419 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21420 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21421 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
21422 // CHECK2-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
21423 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21424 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21425 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21426 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21427 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
21428 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21429 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21430 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21431 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21432 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
21433 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
21434 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
21435 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21436 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21437 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21438 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
21439 // CHECK2-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
21440 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21441 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
21442 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21443 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21444 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21445 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
21446 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21447 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21448 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21449 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
21450 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21451 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
21452 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21453 // CHECK2:       omp.precond.then:
21454 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21455 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21456 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
21457 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21458 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21459 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
21460 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21461 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
21462 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
21463 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21464 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21465 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
21466 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21467 // CHECK2:       cond.true:
21468 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21469 // CHECK2-NEXT:    br label [[COND_END:%.*]]
21470 // CHECK2:       cond.false:
21471 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21472 // CHECK2-NEXT:    br label [[COND_END]]
21473 // CHECK2:       cond.end:
21474 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
21475 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21476 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21477 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
21478 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21479 // CHECK2:       omp.inner.for.cond:
21480 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21481 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21482 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
21483 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
21484 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21485 // CHECK2:       omp.inner.for.body:
21486 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21487 // CHECK2-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
21488 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21489 // CHECK2-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
21490 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[CONV]], align 8
21491 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
21492 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[CONV6]], align 4
21493 // CHECK2-NEXT:    [[TMP19:%.*]] = load i64, i64* [[N_CASTED]], align 8
21494 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[V_ADDR]], align 8
21495 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
21496 // CHECK2-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to i8*
21497 // CHECK2-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
21498 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
21499 // CHECK2-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to i8*
21500 // CHECK2-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
21501 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
21502 // CHECK2-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to i8*
21503 // CHECK2-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 8
21504 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
21505 // CHECK2-NEXT:    [[TMP28:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
21506 // CHECK2-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 8
21507 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
21508 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast i32* [[TMP20]] to i8*
21509 // CHECK2-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
21510 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21511 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
21512 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
21513 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP33]], i64 5)
21514 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21515 // CHECK2:       omp.inner.for.inc:
21516 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21517 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21518 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
21519 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
21520 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21521 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21522 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
21523 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
21524 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21525 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21526 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
21527 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
21528 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21529 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21530 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP40]], [[TMP41]]
21531 // CHECK2-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
21532 // CHECK2:       cond.true11:
21533 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21534 // CHECK2-NEXT:    br label [[COND_END13:%.*]]
21535 // CHECK2:       cond.false12:
21536 // CHECK2-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21537 // CHECK2-NEXT:    br label [[COND_END13]]
21538 // CHECK2:       cond.end13:
21539 // CHECK2-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP42]], [[COND_TRUE11]] ], [ [[TMP43]], [[COND_FALSE12]] ]
21540 // CHECK2-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
21541 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21542 // CHECK2-NEXT:    store i32 [[TMP44]], i32* [[DOTOMP_IV]], align 4
21543 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
21544 // CHECK2:       omp.inner.for.end:
21545 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21546 // CHECK2:       omp.loop.exit:
21547 // CHECK2-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21548 // CHECK2-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
21549 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP46]])
21550 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
21551 // CHECK2:       omp.precond.end:
21552 // CHECK2-NEXT:    ret void
21553 //
21554 //
21555 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__11
21556 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
21557 // CHECK2-NEXT:  entry:
21558 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21559 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21560 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
21561 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
21562 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
21563 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 8
21564 // CHECK2-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 8
21565 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21566 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21567 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21568 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21569 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
21570 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21571 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21572 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21573 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21574 // CHECK2-NEXT:    [[I5:%.*]] = alloca i32, align 4
21575 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21576 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21577 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21578 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21579 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
21580 // CHECK2-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 8
21581 // CHECK2-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 8
21582 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
21583 // CHECK2-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 8
21584 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
21585 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21586 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21587 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
21588 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21589 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21590 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21591 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
21592 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21593 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
21594 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21595 // CHECK2:       omp.precond.then:
21596 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21597 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21598 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
21599 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
21600 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP5]] to i32
21601 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21602 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP6]] to i32
21603 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
21604 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
21605 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21606 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21607 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21608 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
21609 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21610 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21611 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
21612 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21613 // CHECK2:       omp.inner.for.cond:
21614 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21615 // CHECK2-NEXT:    [[CONV6:%.*]] = sext i32 [[TMP10]] to i64
21616 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
21617 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp ule i64 [[CONV6]], [[TMP11]]
21618 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21619 // CHECK2:       omp.inner.for.body:
21620 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21621 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
21622 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21623 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I5]], align 4
21624 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 8
21625 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I5]], align 4
21626 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
21627 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[IDXPROM]]
21628 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
21629 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I5]], align 4
21630 // CHECK2-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP16]] to i64
21631 // CHECK2-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM8]]
21632 // CHECK2-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX9]], align 4
21633 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21634 // CHECK2:       omp.body.continue:
21635 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21636 // CHECK2:       omp.inner.for.inc:
21637 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21638 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21639 // CHECK2-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
21640 // CHECK2-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
21641 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
21642 // CHECK2:       omp.inner.for.end:
21643 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21644 // CHECK2:       omp.loop.exit:
21645 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21646 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
21647 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP20]])
21648 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
21649 // CHECK2:       omp.precond.end:
21650 // CHECK2-NEXT:    ret void
21651 //
21652 //
21653 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
21654 // CHECK3-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
21655 // CHECK3-NEXT:  entry:
21656 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21657 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
21658 // CHECK3-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
21659 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
21660 // CHECK3-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
21661 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
21662 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
21663 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
21664 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21665 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
21666 // CHECK3-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
21667 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
21668 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 true, i1 false, i1 false)
21669 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
21670 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
21671 // CHECK3:       user_code.entry:
21672 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
21673 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
21674 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
21675 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
21676 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_ADDR]], align 4
21677 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[L_CASTED]], align 4
21678 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[L_CASTED]], align 4
21679 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
21680 // CHECK3-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i32]* [[TMP0]], i32 [[TMP6]]) #[[ATTR1:[0-9]+]]
21681 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
21682 // CHECK3-NEXT:    ret void
21683 // CHECK3:       worker.exit:
21684 // CHECK3-NEXT:    ret void
21685 //
21686 //
21687 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__
21688 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
21689 // CHECK3-NEXT:  entry:
21690 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21691 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21692 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21693 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
21694 // CHECK3-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
21695 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21696 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21697 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21698 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21699 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
21700 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21701 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21702 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21703 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21704 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
21705 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
21706 // CHECK3-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
21707 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
21708 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21709 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21710 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21711 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
21712 // CHECK3-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
21713 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
21714 // CHECK3-NEXT:    [[L1:%.*]] = call i8* @__kmpc_alloc_shared(i32 4)
21715 // CHECK3-NEXT:    [[L_ON_STACK:%.*]] = bitcast i8* [[L1]] to i32*
21716 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
21717 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21718 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21719 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
21720 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21721 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
21722 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21723 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
21724 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21725 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
21726 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21727 // CHECK3:       omp.precond.then:
21728 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21729 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21730 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
21731 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21732 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21733 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21734 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
21735 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
21736 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21737 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21738 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
21739 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21740 // CHECK3:       cond.true:
21741 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21742 // CHECK3-NEXT:    br label [[COND_END:%.*]]
21743 // CHECK3:       cond.false:
21744 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21745 // CHECK3-NEXT:    br label [[COND_END]]
21746 // CHECK3:       cond.end:
21747 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
21748 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21749 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21750 // CHECK3-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
21751 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21752 // CHECK3:       omp.inner.for.cond:
21753 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21754 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21755 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
21756 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
21757 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21758 // CHECK3:       omp.inner.for.body:
21759 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21760 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21761 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
21762 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
21763 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
21764 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[L_ADDR]], align 4
21765 // CHECK3-NEXT:    store i32 [[TMP18]], i32* [[L_CASTED]], align 4
21766 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[L_CASTED]], align 4
21767 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
21768 // CHECK3-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP14]] to i8*
21769 // CHECK3-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
21770 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
21771 // CHECK3-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP15]] to i8*
21772 // CHECK3-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
21773 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
21774 // CHECK3-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
21775 // CHECK3-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
21776 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
21777 // CHECK3-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
21778 // CHECK3-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
21779 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
21780 // CHECK3-NEXT:    [[TMP29:%.*]] = inttoptr i32 [[TMP19]] to i8*
21781 // CHECK3-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
21782 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21783 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4
21784 // CHECK3-NEXT:    [[TMP32:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
21785 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP31]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP32]], i32 5)
21786 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21787 // CHECK3:       omp.inner.for.inc:
21788 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21789 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21790 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
21791 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
21792 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21793 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21794 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
21795 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
21796 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21797 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21798 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
21799 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
21800 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21801 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21802 // CHECK3-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP39]], [[TMP40]]
21803 // CHECK3-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
21804 // CHECK3:       cond.true11:
21805 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21806 // CHECK3-NEXT:    br label [[COND_END13:%.*]]
21807 // CHECK3:       cond.false12:
21808 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21809 // CHECK3-NEXT:    br label [[COND_END13]]
21810 // CHECK3:       cond.end13:
21811 // CHECK3-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP41]], [[COND_TRUE11]] ], [ [[TMP42]], [[COND_FALSE12]] ]
21812 // CHECK3-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
21813 // CHECK3-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21814 // CHECK3-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV]], align 4
21815 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
21816 // CHECK3:       omp.inner.for.end:
21817 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21818 // CHECK3:       omp.loop.exit:
21819 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21820 // CHECK3-NEXT:    [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4
21821 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP45]])
21822 // CHECK3-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21823 // CHECK3-NEXT:    [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0
21824 // CHECK3-NEXT:    br i1 [[TMP47]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
21825 // CHECK3:       .omp.lastprivate.then:
21826 // CHECK3-NEXT:    [[TMP48:%.*]] = load i32, i32* [[L_ADDR]], align 4
21827 // CHECK3-NEXT:    store i32 [[TMP48]], i32* [[L_ADDR]], align 4
21828 // CHECK3-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
21829 // CHECK3:       .omp.lastprivate.done:
21830 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
21831 // CHECK3:       omp.precond.end:
21832 // CHECK3-NEXT:    call void @__kmpc_free_shared(i8* [[L1]], i32 4)
21833 // CHECK3-NEXT:    ret void
21834 //
21835 //
21836 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1
21837 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
21838 // CHECK3-NEXT:  entry:
21839 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21840 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21841 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21842 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21843 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21844 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
21845 // CHECK3-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
21846 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21847 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21848 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21849 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21850 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
21851 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21852 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21853 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21854 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21855 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
21856 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21857 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21858 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21859 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21860 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21861 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
21862 // CHECK3-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
21863 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
21864 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
21865 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21866 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21867 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
21868 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21869 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21870 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21871 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
21872 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21873 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
21874 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21875 // CHECK3:       omp.precond.then:
21876 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21877 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21878 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
21879 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21880 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21881 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
21882 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
21883 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21884 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21885 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21886 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
21887 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
21888 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
21889 // CHECK3:       omp.dispatch.cond:
21890 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21891 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21892 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
21893 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21894 // CHECK3:       cond.true:
21895 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21896 // CHECK3-NEXT:    br label [[COND_END:%.*]]
21897 // CHECK3:       cond.false:
21898 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21899 // CHECK3-NEXT:    br label [[COND_END]]
21900 // CHECK3:       cond.end:
21901 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
21902 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21903 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21904 // CHECK3-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
21905 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21906 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21907 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
21908 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
21909 // CHECK3:       omp.dispatch.body:
21910 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21911 // CHECK3:       omp.inner.for.cond:
21912 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21913 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21914 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
21915 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21916 // CHECK3:       omp.inner.for.body:
21917 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21918 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
21919 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21920 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
21921 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
21922 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
21923 // CHECK3-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
21924 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
21925 // CHECK3-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
21926 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21927 // CHECK3:       omp.body.continue:
21928 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21929 // CHECK3:       omp.inner.for.inc:
21930 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21931 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
21932 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
21933 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
21934 // CHECK3:       omp.inner.for.end:
21935 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
21936 // CHECK3:       omp.dispatch.inc:
21937 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21938 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21939 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
21940 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
21941 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21942 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21943 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
21944 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
21945 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
21946 // CHECK3:       omp.dispatch.end:
21947 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21948 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
21949 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
21950 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21951 // CHECK3-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
21952 // CHECK3-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
21953 // CHECK3:       .omp.lastprivate.then:
21954 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
21955 // CHECK3-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
21956 // CHECK3-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
21957 // CHECK3:       .omp.lastprivate.done:
21958 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
21959 // CHECK3:       omp.precond.end:
21960 // CHECK3-NEXT:    ret void
21961 //
21962 //
21963 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
21964 // CHECK3-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR2:[0-9]+]] {
21965 // CHECK3-NEXT:  entry:
21966 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21967 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
21968 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
21969 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
21970 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
21971 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
21972 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21973 // CHECK3-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
21974 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
21975 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
21976 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
21977 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
21978 // CHECK3:       user_code.entry:
21979 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
21980 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
21981 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
21982 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
21983 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
21984 // CHECK3-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i16]* [[TMP0]]) #[[ATTR1]]
21985 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
21986 // CHECK3-NEXT:    ret void
21987 // CHECK3:       worker.exit:
21988 // CHECK3-NEXT:    ret void
21989 //
21990 //
21991 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__2
21992 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
21993 // CHECK3-NEXT:  entry:
21994 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21995 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21996 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21997 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
21998 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21999 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22000 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22001 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22002 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22003 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22004 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22005 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22006 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22007 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
22008 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22009 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
22010 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22011 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22012 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22013 // CHECK3-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
22014 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
22015 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
22016 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22017 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22018 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
22019 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22020 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22021 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22022 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
22023 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22024 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
22025 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22026 // CHECK3:       omp.precond.then:
22027 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22028 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22029 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
22030 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22031 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22032 // CHECK3-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
22033 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22034 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
22035 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
22036 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22037 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22038 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
22039 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22040 // CHECK3:       cond.true:
22041 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22042 // CHECK3-NEXT:    br label [[COND_END:%.*]]
22043 // CHECK3:       cond.false:
22044 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22045 // CHECK3-NEXT:    br label [[COND_END]]
22046 // CHECK3:       cond.end:
22047 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
22048 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22049 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22050 // CHECK3-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
22051 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22052 // CHECK3:       omp.inner.for.cond:
22053 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22054 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22055 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
22056 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
22057 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22058 // CHECK3:       omp.inner.for.body:
22059 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22060 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22061 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
22062 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
22063 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
22064 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
22065 // CHECK3-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
22066 // CHECK3-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
22067 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
22068 // CHECK3-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
22069 // CHECK3-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
22070 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
22071 // CHECK3-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
22072 // CHECK3-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
22073 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
22074 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
22075 // CHECK3-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
22076 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22077 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
22078 // CHECK3-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
22079 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
22080 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22081 // CHECK3:       omp.inner.for.inc:
22082 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22083 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22084 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
22085 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
22086 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22087 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22088 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
22089 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
22090 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22091 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22092 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
22093 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
22094 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22095 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22096 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
22097 // CHECK3-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
22098 // CHECK3:       cond.true10:
22099 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22100 // CHECK3-NEXT:    br label [[COND_END12:%.*]]
22101 // CHECK3:       cond.false11:
22102 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22103 // CHECK3-NEXT:    br label [[COND_END12]]
22104 // CHECK3:       cond.end12:
22105 // CHECK3-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
22106 // CHECK3-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
22107 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22108 // CHECK3-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
22109 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22110 // CHECK3:       omp.inner.for.end:
22111 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22112 // CHECK3:       omp.loop.exit:
22113 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22114 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
22115 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP41]])
22116 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
22117 // CHECK3:       omp.precond.end:
22118 // CHECK3-NEXT:    ret void
22119 //
22120 //
22121 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__3
22122 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
22123 // CHECK3-NEXT:  entry:
22124 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22125 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22126 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22127 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22128 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22129 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
22130 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22131 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22132 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22133 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22134 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22135 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22136 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22137 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22138 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22139 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
22140 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22141 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22142 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22143 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22144 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22145 // CHECK3-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
22146 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
22147 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
22148 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22149 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22150 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
22151 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22152 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22153 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22154 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
22155 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22156 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
22157 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22158 // CHECK3:       omp.precond.then:
22159 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22160 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22161 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
22162 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22163 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22164 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
22165 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
22166 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22167 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22168 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22169 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
22170 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22171 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22172 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
22173 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22174 // CHECK3:       omp.inner.for.cond:
22175 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22176 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22177 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
22178 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22179 // CHECK3:       omp.inner.for.body:
22180 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22181 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
22182 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22183 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
22184 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
22185 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
22186 // CHECK3-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
22187 // CHECK3-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
22188 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
22189 // CHECK3-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
22190 // CHECK3-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
22191 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22192 // CHECK3:       omp.body.continue:
22193 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22194 // CHECK3:       omp.inner.for.inc:
22195 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22196 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22197 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
22198 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
22199 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22200 // CHECK3:       omp.inner.for.end:
22201 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22202 // CHECK3:       omp.loop.exit:
22203 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22204 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
22205 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]])
22206 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
22207 // CHECK3:       omp.precond.end:
22208 // CHECK3-NEXT:    ret void
22209 //
22210 //
22211 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
22212 // CHECK3-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22213 // CHECK3-NEXT:  entry:
22214 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22215 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
22216 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
22217 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
22218 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22219 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22220 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
22221 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
22222 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
22223 // CHECK3:       user_code.entry:
22224 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
22225 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
22226 // CHECK3-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR1]]
22227 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
22228 // CHECK3-NEXT:    ret void
22229 // CHECK3:       worker.exit:
22230 // CHECK3-NEXT:    ret void
22231 //
22232 //
22233 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__4
22234 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22235 // CHECK3-NEXT:  entry:
22236 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22237 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22238 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22239 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22240 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22241 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22242 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22243 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22244 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22245 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22246 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
22247 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22248 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22249 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22250 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22251 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22252 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
22253 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22254 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22255 // CHECK3-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
22256 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22257 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22258 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
22259 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22260 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
22261 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22262 // CHECK3:       cond.true:
22263 // CHECK3-NEXT:    br label [[COND_END:%.*]]
22264 // CHECK3:       cond.false:
22265 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22266 // CHECK3-NEXT:    br label [[COND_END]]
22267 // CHECK3:       cond.end:
22268 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
22269 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22270 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22271 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
22272 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22273 // CHECK3:       omp.inner.for.cond:
22274 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22275 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
22276 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22277 // CHECK3:       omp.inner.for.body:
22278 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22279 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22280 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
22281 // CHECK3-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
22282 // CHECK3-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
22283 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
22284 // CHECK3-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
22285 // CHECK3-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
22286 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
22287 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
22288 // CHECK3-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
22289 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
22290 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
22291 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22292 // CHECK3:       omp.inner.for.inc:
22293 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22294 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22295 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
22296 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
22297 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22298 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22299 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
22300 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
22301 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22302 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22303 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
22304 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
22305 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22306 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
22307 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
22308 // CHECK3:       cond.true5:
22309 // CHECK3-NEXT:    br label [[COND_END7:%.*]]
22310 // CHECK3:       cond.false6:
22311 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22312 // CHECK3-NEXT:    br label [[COND_END7]]
22313 // CHECK3:       cond.end7:
22314 // CHECK3-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
22315 // CHECK3-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
22316 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22317 // CHECK3-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
22318 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22319 // CHECK3:       omp.inner.for.end:
22320 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22321 // CHECK3:       omp.loop.exit:
22322 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
22323 // CHECK3-NEXT:    ret void
22324 //
22325 //
22326 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__5
22327 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22328 // CHECK3-NEXT:  entry:
22329 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22330 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22331 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22332 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22333 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22334 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22335 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22336 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22337 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22338 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22339 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22340 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22341 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22342 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22343 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22344 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22345 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22346 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22347 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22348 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22349 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22350 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22351 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
22352 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
22353 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22354 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22355 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22356 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
22357 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22358 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22359 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
22360 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22361 // CHECK3:       omp.inner.for.cond:
22362 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22363 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22364 // CHECK3-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
22365 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22366 // CHECK3:       omp.inner.for.body:
22367 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22368 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
22369 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22370 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
22371 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
22372 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
22373 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
22374 // CHECK3-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
22375 // CHECK3-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
22376 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22377 // CHECK3:       omp.body.continue:
22378 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22379 // CHECK3:       omp.inner.for.inc:
22380 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22381 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22382 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
22383 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
22384 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22385 // CHECK3:       omp.inner.for.end:
22386 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22387 // CHECK3:       omp.loop.exit:
22388 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
22389 // CHECK3-NEXT:    ret void
22390 //
22391 //
22392 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
22393 // CHECK3-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
22394 // CHECK3-NEXT:  entry:
22395 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22396 // CHECK3-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
22397 // CHECK3-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
22398 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
22399 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
22400 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
22401 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22402 // CHECK3-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
22403 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22404 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
22405 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
22406 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
22407 // CHECK3:       user_code.entry:
22408 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
22409 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_ADDR]], align 4
22410 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[F_CASTED]], align 4
22411 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[F_CASTED]], align 4
22412 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
22413 // CHECK3-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP4]]) #[[ATTR1]]
22414 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
22415 // CHECK3-NEXT:    ret void
22416 // CHECK3:       worker.exit:
22417 // CHECK3-NEXT:    ret void
22418 //
22419 //
22420 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__6
22421 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
22422 // CHECK3-NEXT:  entry:
22423 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22424 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22425 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22426 // CHECK3-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
22427 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22428 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22429 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
22430 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22431 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22432 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22433 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22434 // CHECK3-NEXT:    [[K:%.*]] = alloca i32, align 4
22435 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22436 // CHECK3-NEXT:    [[J:%.*]] = alloca i32, align 4
22437 // CHECK3-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
22438 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
22439 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22440 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22441 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22442 // CHECK3-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
22443 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22444 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22445 // CHECK3-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
22446 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22447 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22448 // CHECK3-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
22449 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22450 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22451 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
22452 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22453 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
22454 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22455 // CHECK3:       cond.true:
22456 // CHECK3-NEXT:    br label [[COND_END:%.*]]
22457 // CHECK3:       cond.false:
22458 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22459 // CHECK3-NEXT:    br label [[COND_END]]
22460 // CHECK3:       cond.end:
22461 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
22462 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22463 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22464 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
22465 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22466 // CHECK3:       omp.inner.for.cond:
22467 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22468 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
22469 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22470 // CHECK3:       omp.inner.for.body:
22471 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22472 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22473 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
22474 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
22475 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
22476 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
22477 // CHECK3-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
22478 // CHECK3-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
22479 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
22480 // CHECK3-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
22481 // CHECK3-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
22482 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
22483 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
22484 // CHECK3-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
22485 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
22486 // CHECK3-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
22487 // CHECK3-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
22488 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
22489 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
22490 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22491 // CHECK3:       omp.inner.for.inc:
22492 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22493 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22494 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
22495 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
22496 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22497 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22498 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
22499 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
22500 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22501 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22502 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
22503 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
22504 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22505 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
22506 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
22507 // CHECK3:       cond.true6:
22508 // CHECK3-NEXT:    br label [[COND_END8:%.*]]
22509 // CHECK3:       cond.false7:
22510 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22511 // CHECK3-NEXT:    br label [[COND_END8]]
22512 // CHECK3:       cond.end8:
22513 // CHECK3-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
22514 // CHECK3-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
22515 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22516 // CHECK3-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
22517 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22518 // CHECK3:       omp.inner.for.end:
22519 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22520 // CHECK3:       omp.loop.exit:
22521 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
22522 // CHECK3-NEXT:    ret void
22523 //
22524 //
22525 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__7
22526 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
22527 // CHECK3-NEXT:  entry:
22528 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22529 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22530 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22531 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22532 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22533 // CHECK3-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
22534 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22535 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22536 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
22537 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22538 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22539 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22540 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22541 // CHECK3-NEXT:    [[K:%.*]] = alloca i32, align 4
22542 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22543 // CHECK3-NEXT:    [[J:%.*]] = alloca i32, align 4
22544 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22545 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22546 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22547 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22548 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22549 // CHECK3-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
22550 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22551 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22552 // CHECK3-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
22553 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22554 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22555 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
22556 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
22557 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22558 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22559 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22560 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
22561 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22562 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22563 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
22564 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22565 // CHECK3:       omp.inner.for.cond:
22566 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22567 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22568 // CHECK3-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
22569 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22570 // CHECK3:       omp.inner.for.body:
22571 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22572 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
22573 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
22574 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22575 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
22576 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22577 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22578 // CHECK3-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
22579 // CHECK3-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
22580 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
22581 // CHECK3-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
22582 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
22583 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
22584 // CHECK3-NEXT:    store i32 10, i32* [[K]], align 4
22585 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
22586 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
22587 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
22588 // CHECK3-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
22589 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
22590 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
22591 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
22592 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
22593 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
22594 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
22595 // CHECK3-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
22596 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
22597 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22598 // CHECK3:       omp.body.continue:
22599 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22600 // CHECK3:       omp.inner.for.inc:
22601 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22602 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22603 // CHECK3-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
22604 // CHECK3-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
22605 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22606 // CHECK3:       omp.inner.for.end:
22607 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22608 // CHECK3:       omp.loop.exit:
22609 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
22610 // CHECK3-NEXT:    ret void
22611 //
22612 //
22613 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l52
22614 // CHECK3-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
22615 // CHECK3-NEXT:  entry:
22616 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22617 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22618 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22619 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
22620 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
22621 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
22622 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22623 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22624 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22625 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
22626 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
22627 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
22628 // CHECK3:       user_code.entry:
22629 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
22630 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
22631 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
22632 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
22633 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
22634 // CHECK3-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR1]]
22635 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
22636 // CHECK3-NEXT:    ret void
22637 // CHECK3:       worker.exit:
22638 // CHECK3-NEXT:    ret void
22639 //
22640 //
22641 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__8
22642 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
22643 // CHECK3-NEXT:  entry:
22644 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22645 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22646 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22647 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22648 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22649 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22650 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
22651 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22652 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22653 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
22654 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22655 // CHECK3-NEXT:    [[J:%.*]] = alloca i32, align 4
22656 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
22657 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
22658 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22659 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22660 // CHECK3-NEXT:    [[I9:%.*]] = alloca i32, align 4
22661 // CHECK3-NEXT:    [[J10:%.*]] = alloca i32, align 4
22662 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22663 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
22664 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22665 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22666 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22667 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22668 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22669 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
22670 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22671 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
22672 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22673 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22674 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22675 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22676 // CHECK3-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
22677 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22678 // CHECK3-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
22679 // CHECK3-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
22680 // CHECK3-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
22681 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
22682 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
22683 // CHECK3-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
22684 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
22685 // CHECK3-NEXT:    store i32 0, i32* [[J]], align 4
22686 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22687 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
22688 // CHECK3-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
22689 // CHECK3:       land.lhs.true:
22690 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22691 // CHECK3-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
22692 // CHECK3-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
22693 // CHECK3:       omp.precond.then:
22694 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
22695 // CHECK3-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22696 // CHECK3-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
22697 // CHECK3-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22698 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22699 // CHECK3-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
22700 // CHECK3-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
22701 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22702 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22703 // CHECK3-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
22704 // CHECK3-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22705 // CHECK3-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22706 // CHECK3-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
22707 // CHECK3-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22708 // CHECK3:       cond.true:
22709 // CHECK3-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22710 // CHECK3-NEXT:    br label [[COND_END:%.*]]
22711 // CHECK3:       cond.false:
22712 // CHECK3-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22713 // CHECK3-NEXT:    br label [[COND_END]]
22714 // CHECK3:       cond.end:
22715 // CHECK3-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
22716 // CHECK3-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
22717 // CHECK3-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
22718 // CHECK3-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
22719 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22720 // CHECK3:       omp.inner.for.cond:
22721 // CHECK3-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22722 // CHECK3-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22723 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
22724 // CHECK3-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
22725 // CHECK3-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22726 // CHECK3:       omp.inner.for.body:
22727 // CHECK3-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
22728 // CHECK3-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
22729 // CHECK3-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22730 // CHECK3-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
22731 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
22732 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
22733 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
22734 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
22735 // CHECK3-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
22736 // CHECK3-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
22737 // CHECK3-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
22738 // CHECK3-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
22739 // CHECK3-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
22740 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
22741 // CHECK3-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
22742 // CHECK3-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
22743 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
22744 // CHECK3-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
22745 // CHECK3-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
22746 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22747 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
22748 // CHECK3-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
22749 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
22750 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22751 // CHECK3:       omp.inner.for.inc:
22752 // CHECK3-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22753 // CHECK3-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
22754 // CHECK3-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
22755 // CHECK3-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
22756 // CHECK3-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
22757 // CHECK3-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
22758 // CHECK3-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
22759 // CHECK3-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
22760 // CHECK3-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22761 // CHECK3-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
22762 // CHECK3-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
22763 // CHECK3-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
22764 // CHECK3-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22765 // CHECK3-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22766 // CHECK3-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
22767 // CHECK3-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
22768 // CHECK3:       cond.true18:
22769 // CHECK3-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22770 // CHECK3-NEXT:    br label [[COND_END20:%.*]]
22771 // CHECK3:       cond.false19:
22772 // CHECK3-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
22773 // CHECK3-NEXT:    br label [[COND_END20]]
22774 // CHECK3:       cond.end20:
22775 // CHECK3-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
22776 // CHECK3-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
22777 // CHECK3-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
22778 // CHECK3-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
22779 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22780 // CHECK3:       omp.inner.for.end:
22781 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22782 // CHECK3:       omp.loop.exit:
22783 // CHECK3-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22784 // CHECK3-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
22785 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP46]])
22786 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
22787 // CHECK3:       omp.precond.end:
22788 // CHECK3-NEXT:    ret void
22789 //
22790 //
22791 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__9
22792 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
22793 // CHECK3-NEXT:  entry:
22794 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22795 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22796 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22797 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22798 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22799 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
22800 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22801 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22802 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
22803 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22804 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22805 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
22806 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22807 // CHECK3-NEXT:    [[J:%.*]] = alloca i32, align 4
22808 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
22809 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
22810 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22811 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22812 // CHECK3-NEXT:    [[I11:%.*]] = alloca i32, align 4
22813 // CHECK3-NEXT:    [[J12:%.*]] = alloca i32, align 4
22814 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22815 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22816 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22817 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22818 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22819 // CHECK3-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
22820 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
22821 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
22822 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22823 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
22824 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22825 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22826 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
22827 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22828 // CHECK3-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
22829 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22830 // CHECK3-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
22831 // CHECK3-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
22832 // CHECK3-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
22833 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
22834 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
22835 // CHECK3-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
22836 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
22837 // CHECK3-NEXT:    store i32 0, i32* [[J]], align 4
22838 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22839 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
22840 // CHECK3-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
22841 // CHECK3:       land.lhs.true:
22842 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22843 // CHECK3-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
22844 // CHECK3-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
22845 // CHECK3:       omp.precond.then:
22846 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
22847 // CHECK3-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
22848 // CHECK3-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
22849 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22850 // CHECK3-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
22851 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22852 // CHECK3-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
22853 // CHECK3-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
22854 // CHECK3-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
22855 // CHECK3-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22856 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22857 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22858 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
22859 // CHECK3-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22860 // CHECK3-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
22861 // CHECK3-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
22862 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22863 // CHECK3:       omp.inner.for.cond:
22864 // CHECK3-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22865 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22866 // CHECK3-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
22867 // CHECK3-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
22868 // CHECK3-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22869 // CHECK3:       omp.inner.for.body:
22870 // CHECK3-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22871 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22872 // CHECK3-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
22873 // CHECK3-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
22874 // CHECK3-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
22875 // CHECK3-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
22876 // CHECK3-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
22877 // CHECK3-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
22878 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
22879 // CHECK3-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
22880 // CHECK3-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
22881 // CHECK3-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22882 // CHECK3-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22883 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22884 // CHECK3-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
22885 // CHECK3-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
22886 // CHECK3-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
22887 // CHECK3-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
22888 // CHECK3-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
22889 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22890 // CHECK3-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
22891 // CHECK3-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
22892 // CHECK3-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
22893 // CHECK3-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
22894 // CHECK3-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
22895 // CHECK3-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
22896 // CHECK3-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
22897 // CHECK3-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
22898 // CHECK3-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
22899 // CHECK3-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
22900 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
22901 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
22902 // CHECK3-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
22903 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
22904 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
22905 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
22906 // CHECK3-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
22907 // CHECK3-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
22908 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22909 // CHECK3:       omp.body.continue:
22910 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22911 // CHECK3:       omp.inner.for.inc:
22912 // CHECK3-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22913 // CHECK3-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
22914 // CHECK3-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
22915 // CHECK3-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
22916 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
22917 // CHECK3:       omp.inner.for.end:
22918 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22919 // CHECK3:       omp.loop.exit:
22920 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22921 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
22922 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP28]])
22923 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
22924 // CHECK3:       omp.precond.end:
22925 // CHECK3-NEXT:    ret void
22926 //
22927 //
22928 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
22929 // CHECK3-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
22930 // CHECK3-NEXT:  entry:
22931 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22932 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
22933 // CHECK3-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
22934 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22935 // CHECK3-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
22936 // CHECK3-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
22937 // CHECK3-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
22938 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22939 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
22940 // CHECK3-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
22941 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
22942 // CHECK3-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
22943 // CHECK3-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
22944 // CHECK3-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
22945 // CHECK3:       user_code.entry:
22946 // CHECK3-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
22947 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
22948 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
22949 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
22950 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[V_ADDR]], align 4
22951 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
22952 // CHECK3-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i32]* [[TMP0]], i32* [[TMP5]]) #[[ATTR1]]
22953 // CHECK3-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
22954 // CHECK3-NEXT:    ret void
22955 // CHECK3:       worker.exit:
22956 // CHECK3-NEXT:    ret void
22957 //
22958 //
22959 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__10
22960 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
22961 // CHECK3-NEXT:  entry:
22962 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22963 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22964 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22965 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
22966 // CHECK3-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
22967 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22968 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22969 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22970 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22971 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
22972 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22973 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22974 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22975 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22976 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
22977 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22978 // CHECK3-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
22979 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22980 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22981 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22982 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
22983 // CHECK3-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
22984 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
22985 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
22986 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22987 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22988 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
22989 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22990 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22991 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22992 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
22993 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22994 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
22995 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22996 // CHECK3:       omp.precond.then:
22997 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22998 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22999 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
23000 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23001 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23002 // CHECK3-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
23003 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23004 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
23005 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
23006 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23007 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23008 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
23009 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23010 // CHECK3:       cond.true:
23011 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23012 // CHECK3-NEXT:    br label [[COND_END:%.*]]
23013 // CHECK3:       cond.false:
23014 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23015 // CHECK3-NEXT:    br label [[COND_END]]
23016 // CHECK3:       cond.end:
23017 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
23018 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23019 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23020 // CHECK3-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
23021 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23022 // CHECK3:       omp.inner.for.cond:
23023 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23024 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23025 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
23026 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
23027 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23028 // CHECK3:       omp.inner.for.body:
23029 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23030 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23031 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
23032 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
23033 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
23034 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
23035 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
23036 // CHECK3-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
23037 // CHECK3-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
23038 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
23039 // CHECK3-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
23040 // CHECK3-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
23041 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
23042 // CHECK3-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
23043 // CHECK3-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
23044 // CHECK3-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
23045 // CHECK3-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
23046 // CHECK3-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
23047 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
23048 // CHECK3-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
23049 // CHECK3-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
23050 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23051 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23052 // CHECK3-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
23053 // CHECK3-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
23054 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23055 // CHECK3:       omp.inner.for.inc:
23056 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23057 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23058 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
23059 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
23060 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23061 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23062 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
23063 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
23064 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23065 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23066 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
23067 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
23068 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23069 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23070 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
23071 // CHECK3-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
23072 // CHECK3:       cond.true10:
23073 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23074 // CHECK3-NEXT:    br label [[COND_END12:%.*]]
23075 // CHECK3:       cond.false11:
23076 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23077 // CHECK3-NEXT:    br label [[COND_END12]]
23078 // CHECK3:       cond.end12:
23079 // CHECK3-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
23080 // CHECK3-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
23081 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23082 // CHECK3-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
23083 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
23084 // CHECK3:       omp.inner.for.end:
23085 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23086 // CHECK3:       omp.loop.exit:
23087 // CHECK3-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23088 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
23089 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP44]])
23090 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
23091 // CHECK3:       omp.precond.end:
23092 // CHECK3-NEXT:    ret void
23093 //
23094 //
23095 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__11
23096 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
23097 // CHECK3-NEXT:  entry:
23098 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23099 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23100 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23101 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23102 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23103 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
23104 // CHECK3-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
23105 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23106 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23107 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23108 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23109 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
23110 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23111 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23112 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23113 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23114 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
23115 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23116 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23117 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23118 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23119 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23120 // CHECK3-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
23121 // CHECK3-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
23122 // CHECK3-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
23123 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
23124 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23125 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23126 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
23127 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23128 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23129 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23130 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
23131 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23132 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
23133 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23134 // CHECK3:       omp.precond.then:
23135 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23136 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23137 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
23138 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23139 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23140 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
23141 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
23142 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23143 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23144 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23145 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
23146 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23147 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23148 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
23149 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23150 // CHECK3:       omp.inner.for.cond:
23151 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23152 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23153 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
23154 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23155 // CHECK3:       omp.inner.for.body:
23156 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23157 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
23158 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23159 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
23160 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
23161 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
23162 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
23163 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
23164 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
23165 // CHECK3-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
23166 // CHECK3-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
23167 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23168 // CHECK3:       omp.body.continue:
23169 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23170 // CHECK3:       omp.inner.for.inc:
23171 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23172 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23173 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
23174 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
23175 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
23176 // CHECK3:       omp.inner.for.end:
23177 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23178 // CHECK3:       omp.loop.exit:
23179 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23180 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
23181 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP20]])
23182 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
23183 // CHECK3:       omp.precond.end:
23184 // CHECK3-NEXT:    ret void
23185 //
23186 //
23187 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
23188 // CHECK4-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
23189 // CHECK4-NEXT:  entry:
23190 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23191 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
23192 // CHECK4-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
23193 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
23194 // CHECK4-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
23195 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
23196 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
23197 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
23198 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23199 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
23200 // CHECK4-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
23201 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
23202 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 true, i1 false, i1 false)
23203 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
23204 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
23205 // CHECK4:       user_code.entry:
23206 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
23207 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
23208 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
23209 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
23210 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[L_ADDR]], align 4
23211 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[L_CASTED]], align 4
23212 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[L_CASTED]], align 4
23213 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
23214 // CHECK4-NEXT:    call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i32]* [[TMP0]], i32 [[TMP6]]) #[[ATTR1:[0-9]+]]
23215 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
23216 // CHECK4-NEXT:    ret void
23217 // CHECK4:       worker.exit:
23218 // CHECK4-NEXT:    ret void
23219 //
23220 //
23221 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__
23222 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
23223 // CHECK4-NEXT:  entry:
23224 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23225 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23226 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23227 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
23228 // CHECK4-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
23229 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23230 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23231 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23232 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
23233 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23234 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23235 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23236 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23237 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23238 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
23239 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
23240 // CHECK4-NEXT:    [[L_CASTED:%.*]] = alloca i32, align 4
23241 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
23242 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23243 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23244 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23245 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
23246 // CHECK4-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
23247 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
23248 // CHECK4-NEXT:    [[L1:%.*]] = call i8* @__kmpc_alloc_shared(i32 4)
23249 // CHECK4-NEXT:    [[L_ON_STACK:%.*]] = bitcast i8* [[L1]] to i32*
23250 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
23251 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23252 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23253 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
23254 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23255 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
23256 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
23257 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
23258 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23259 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
23260 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23261 // CHECK4:       omp.precond.then:
23262 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23263 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23264 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
23265 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23266 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23267 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23268 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
23269 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 128)
23270 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23271 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23272 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
23273 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23274 // CHECK4:       cond.true:
23275 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23276 // CHECK4-NEXT:    br label [[COND_END:%.*]]
23277 // CHECK4:       cond.false:
23278 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23279 // CHECK4-NEXT:    br label [[COND_END]]
23280 // CHECK4:       cond.end:
23281 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
23282 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23283 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23284 // CHECK4-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
23285 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23286 // CHECK4:       omp.inner.for.cond:
23287 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23288 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23289 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
23290 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
23291 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23292 // CHECK4:       omp.inner.for.body:
23293 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23294 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23295 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
23296 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
23297 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
23298 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[L_ADDR]], align 4
23299 // CHECK4-NEXT:    store i32 [[TMP18]], i32* [[L_CASTED]], align 4
23300 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[L_CASTED]], align 4
23301 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
23302 // CHECK4-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP14]] to i8*
23303 // CHECK4-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
23304 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
23305 // CHECK4-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP15]] to i8*
23306 // CHECK4-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
23307 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
23308 // CHECK4-NEXT:    [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
23309 // CHECK4-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
23310 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
23311 // CHECK4-NEXT:    [[TMP27:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
23312 // CHECK4-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 4
23313 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
23314 // CHECK4-NEXT:    [[TMP29:%.*]] = inttoptr i32 [[TMP19]] to i8*
23315 // CHECK4-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 4
23316 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23317 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4
23318 // CHECK4-NEXT:    [[TMP32:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
23319 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP31]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP32]], i32 5)
23320 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23321 // CHECK4:       omp.inner.for.inc:
23322 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23323 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23324 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
23325 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
23326 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23327 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23328 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
23329 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
23330 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23331 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23332 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
23333 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
23334 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23335 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23336 // CHECK4-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[TMP39]], [[TMP40]]
23337 // CHECK4-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
23338 // CHECK4:       cond.true11:
23339 // CHECK4-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
23340 // CHECK4-NEXT:    br label [[COND_END13:%.*]]
23341 // CHECK4:       cond.false12:
23342 // CHECK4-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23343 // CHECK4-NEXT:    br label [[COND_END13]]
23344 // CHECK4:       cond.end13:
23345 // CHECK4-NEXT:    [[COND14:%.*]] = phi i32 [ [[TMP41]], [[COND_TRUE11]] ], [ [[TMP42]], [[COND_FALSE12]] ]
23346 // CHECK4-NEXT:    store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
23347 // CHECK4-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23348 // CHECK4-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV]], align 4
23349 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23350 // CHECK4:       omp.inner.for.end:
23351 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23352 // CHECK4:       omp.loop.exit:
23353 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23354 // CHECK4-NEXT:    [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4
23355 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP45]])
23356 // CHECK4-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23357 // CHECK4-NEXT:    [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0
23358 // CHECK4-NEXT:    br i1 [[TMP47]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
23359 // CHECK4:       .omp.lastprivate.then:
23360 // CHECK4-NEXT:    [[TMP48:%.*]] = load i32, i32* [[L_ADDR]], align 4
23361 // CHECK4-NEXT:    store i32 [[TMP48]], i32* [[L_ADDR]], align 4
23362 // CHECK4-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
23363 // CHECK4:       .omp.lastprivate.done:
23364 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
23365 // CHECK4:       omp.precond.end:
23366 // CHECK4-NEXT:    call void @__kmpc_free_shared(i8* [[L1]], i32 4)
23367 // CHECK4-NEXT:    ret void
23368 //
23369 //
23370 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__1
23371 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0]] {
23372 // CHECK4-NEXT:  entry:
23373 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23374 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23375 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23376 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23377 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23378 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
23379 // CHECK4-NEXT:    [[L_ADDR:%.*]] = alloca i32, align 4
23380 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23381 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23382 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23383 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23384 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23385 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23386 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23387 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23388 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23389 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
23390 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23391 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23392 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23393 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23394 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23395 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
23396 // CHECK4-NEXT:    store i32 [[L]], i32* [[L_ADDR]], align 4
23397 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
23398 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
23399 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23400 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23401 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
23402 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23403 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23404 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23405 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
23406 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23407 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
23408 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23409 // CHECK4:       omp.precond.then:
23410 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23411 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23412 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
23413 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23414 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23415 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
23416 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
23417 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23418 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23419 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23420 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
23421 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 32)
23422 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
23423 // CHECK4:       omp.dispatch.cond:
23424 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23425 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23426 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
23427 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23428 // CHECK4:       cond.true:
23429 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23430 // CHECK4-NEXT:    br label [[COND_END:%.*]]
23431 // CHECK4:       cond.false:
23432 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23433 // CHECK4-NEXT:    br label [[COND_END]]
23434 // CHECK4:       cond.end:
23435 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
23436 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23437 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23438 // CHECK4-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
23439 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23440 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23441 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
23442 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
23443 // CHECK4:       omp.dispatch.body:
23444 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23445 // CHECK4:       omp.inner.for.cond:
23446 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23447 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23448 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
23449 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23450 // CHECK4:       omp.inner.for.body:
23451 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23452 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
23453 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23454 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
23455 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[I3]], align 4
23456 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP19]]
23457 // CHECK4-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
23458 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[I3]], align 4
23459 // CHECK4-NEXT:    store i32 [[TMP20]], i32* [[L_ADDR]], align 4
23460 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23461 // CHECK4:       omp.body.continue:
23462 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23463 // CHECK4:       omp.inner.for.inc:
23464 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23465 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
23466 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
23467 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23468 // CHECK4:       omp.inner.for.end:
23469 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
23470 // CHECK4:       omp.dispatch.inc:
23471 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23472 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23473 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
23474 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
23475 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23476 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23477 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
23478 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
23479 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
23480 // CHECK4:       omp.dispatch.end:
23481 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23482 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
23483 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
23484 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23485 // CHECK4-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
23486 // CHECK4-NEXT:    br i1 [[TMP29]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
23487 // CHECK4:       .omp.lastprivate.then:
23488 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[L_ADDR]], align 4
23489 // CHECK4-NEXT:    store i32 [[TMP30]], i32* [[L_ADDR]], align 4
23490 // CHECK4-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
23491 // CHECK4:       .omp.lastprivate.done:
23492 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
23493 // CHECK4:       omp.precond.end:
23494 // CHECK4-NEXT:    ret void
23495 //
23496 //
23497 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
23498 // CHECK4-SAME: (i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR2:[0-9]+]] {
23499 // CHECK4-NEXT:  entry:
23500 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23501 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
23502 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
23503 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
23504 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
23505 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
23506 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23507 // CHECK4-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
23508 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
23509 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
23510 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
23511 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
23512 // CHECK4:       user_code.entry:
23513 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
23514 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
23515 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
23516 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
23517 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
23518 // CHECK4-NEXT:    call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i16]* [[TMP0]]) #[[ATTR1]]
23519 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
23520 // CHECK4-NEXT:    ret void
23521 // CHECK4:       worker.exit:
23522 // CHECK4-NEXT:    ret void
23523 //
23524 //
23525 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__2
23526 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
23527 // CHECK4-NEXT:  entry:
23528 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23529 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23530 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23531 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
23532 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23533 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23534 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23535 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23536 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23537 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23538 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23539 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23540 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23541 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
23542 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
23543 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
23544 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23545 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23546 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23547 // CHECK4-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
23548 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
23549 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
23550 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23551 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23552 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
23553 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23554 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23555 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23556 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
23557 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23558 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
23559 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23560 // CHECK4:       omp.precond.then:
23561 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23562 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23563 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
23564 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23565 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23566 // CHECK4-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
23567 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23568 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
23569 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
23570 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23571 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23572 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
23573 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23574 // CHECK4:       cond.true:
23575 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23576 // CHECK4-NEXT:    br label [[COND_END:%.*]]
23577 // CHECK4:       cond.false:
23578 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23579 // CHECK4-NEXT:    br label [[COND_END]]
23580 // CHECK4:       cond.end:
23581 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
23582 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23583 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23584 // CHECK4-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
23585 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23586 // CHECK4:       omp.inner.for.cond:
23587 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23588 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23589 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
23590 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
23591 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23592 // CHECK4:       omp.inner.for.body:
23593 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23594 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23595 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
23596 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
23597 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
23598 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
23599 // CHECK4-NEXT:    [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to i8*
23600 // CHECK4-NEXT:    store i8* [[TMP19]], i8** [[TMP18]], align 4
23601 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
23602 // CHECK4-NEXT:    [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to i8*
23603 // CHECK4-NEXT:    store i8* [[TMP21]], i8** [[TMP20]], align 4
23604 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
23605 // CHECK4-NEXT:    [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to i8*
23606 // CHECK4-NEXT:    store i8* [[TMP23]], i8** [[TMP22]], align 4
23607 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
23608 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast [1000 x i16]* [[TMP0]] to i8*
23609 // CHECK4-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 4
23610 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23611 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
23612 // CHECK4-NEXT:    [[TMP28:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
23613 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP27]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i16]*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP28]], i32 4)
23614 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23615 // CHECK4:       omp.inner.for.inc:
23616 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23617 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23618 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
23619 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
23620 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23621 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23622 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
23623 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
23624 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23625 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23626 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
23627 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
23628 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23629 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23630 // CHECK4-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
23631 // CHECK4-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
23632 // CHECK4:       cond.true10:
23633 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23634 // CHECK4-NEXT:    br label [[COND_END12:%.*]]
23635 // CHECK4:       cond.false11:
23636 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23637 // CHECK4-NEXT:    br label [[COND_END12]]
23638 // CHECK4:       cond.end12:
23639 // CHECK4-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
23640 // CHECK4-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
23641 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23642 // CHECK4-NEXT:    store i32 [[TMP39]], i32* [[DOTOMP_IV]], align 4
23643 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23644 // CHECK4:       omp.inner.for.end:
23645 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23646 // CHECK4:       omp.loop.exit:
23647 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23648 // CHECK4-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
23649 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP41]])
23650 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
23651 // CHECK4:       omp.precond.end:
23652 // CHECK4-NEXT:    ret void
23653 //
23654 //
23655 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__3
23656 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i16]* nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
23657 // CHECK4-NEXT:  entry:
23658 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23659 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23660 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23661 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23662 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23663 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca [1000 x i16]*, align 4
23664 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23665 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23666 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23667 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23668 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23669 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23670 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23671 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23672 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23673 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
23674 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23675 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23676 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23677 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23678 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23679 // CHECK4-NEXT:    store [1000 x i16]* [[AA]], [1000 x i16]** [[AA_ADDR]], align 4
23680 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i16]*, [1000 x i16]** [[AA_ADDR]], align 4
23681 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
23682 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23683 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23684 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
23685 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23686 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23687 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23688 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
23689 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23690 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
23691 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23692 // CHECK4:       omp.precond.then:
23693 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23694 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23695 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
23696 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23697 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23698 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
23699 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
23700 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23701 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23702 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23703 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
23704 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23705 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23706 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
23707 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23708 // CHECK4:       omp.inner.for.cond:
23709 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23710 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23711 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
23712 // CHECK4-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23713 // CHECK4:       omp.inner.for.body:
23714 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23715 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
23716 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23717 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
23718 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I3]], align 4
23719 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], [1000 x i16]* [[TMP0]], i32 0, i32 [[TMP13]]
23720 // CHECK4-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX]], align 2
23721 // CHECK4-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
23722 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
23723 // CHECK4-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
23724 // CHECK4-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX]], align 2
23725 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23726 // CHECK4:       omp.body.continue:
23727 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23728 // CHECK4:       omp.inner.for.inc:
23729 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23730 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23731 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
23732 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
23733 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23734 // CHECK4:       omp.inner.for.end:
23735 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23736 // CHECK4:       omp.loop.exit:
23737 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23738 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
23739 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]])
23740 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
23741 // CHECK4:       omp.precond.end:
23742 // CHECK4-NEXT:    ret void
23743 //
23744 //
23745 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
23746 // CHECK4-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23747 // CHECK4-NEXT:  entry:
23748 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23749 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
23750 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
23751 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
23752 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23753 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23754 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
23755 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
23756 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
23757 // CHECK4:       user_code.entry:
23758 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
23759 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
23760 // CHECK4-NEXT:    call void @__omp_outlined__4(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP0]]) #[[ATTR1]]
23761 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
23762 // CHECK4-NEXT:    ret void
23763 // CHECK4:       worker.exit:
23764 // CHECK4-NEXT:    ret void
23765 //
23766 //
23767 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__4
23768 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23769 // CHECK4-NEXT:  entry:
23770 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23771 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23772 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23773 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23774 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23775 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23776 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23777 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23778 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23779 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23780 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
23781 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23782 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23783 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23784 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23785 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23786 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
23787 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23788 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23789 // CHECK4-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
23790 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23791 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
23792 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
23793 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23794 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
23795 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23796 // CHECK4:       cond.true:
23797 // CHECK4-NEXT:    br label [[COND_END:%.*]]
23798 // CHECK4:       cond.false:
23799 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23800 // CHECK4-NEXT:    br label [[COND_END]]
23801 // CHECK4:       cond.end:
23802 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23803 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23804 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23805 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23806 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23807 // CHECK4:       omp.inner.for.cond:
23808 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23809 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
23810 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23811 // CHECK4:       omp.inner.for.body:
23812 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23813 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23814 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
23815 // CHECK4-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to i8*
23816 // CHECK4-NEXT:    store i8* [[TMP10]], i8** [[TMP9]], align 4
23817 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
23818 // CHECK4-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to i8*
23819 // CHECK4-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
23820 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
23821 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
23822 // CHECK4-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
23823 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
23824 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @__omp_outlined__5 to i8*), i8* null, i8** [[TMP15]], i32 3)
23825 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23826 // CHECK4:       omp.inner.for.inc:
23827 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23828 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23829 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
23830 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
23831 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23832 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23833 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
23834 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_COMB_LB]], align 4
23835 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23836 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23837 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
23838 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_UB]], align 4
23839 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23840 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
23841 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
23842 // CHECK4:       cond.true5:
23843 // CHECK4-NEXT:    br label [[COND_END7:%.*]]
23844 // CHECK4:       cond.false6:
23845 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23846 // CHECK4-NEXT:    br label [[COND_END7]]
23847 // CHECK4:       cond.end7:
23848 // CHECK4-NEXT:    [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
23849 // CHECK4-NEXT:    store i32 [[COND8]], i32* [[DOTOMP_COMB_UB]], align 4
23850 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23851 // CHECK4-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
23852 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23853 // CHECK4:       omp.inner.for.end:
23854 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23855 // CHECK4:       omp.loop.exit:
23856 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
23857 // CHECK4-NEXT:    ret void
23858 //
23859 //
23860 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__5
23861 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23862 // CHECK4-NEXT:  entry:
23863 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23864 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23865 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23866 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23867 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23868 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23869 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23870 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23871 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23872 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23873 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23874 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23875 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23876 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23877 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23878 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23879 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23880 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23881 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23882 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23883 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23884 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23885 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
23886 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
23887 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23888 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23889 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23890 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
23891 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23892 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23893 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23894 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23895 // CHECK4:       omp.inner.for.cond:
23896 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23897 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23898 // CHECK4-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
23899 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23900 // CHECK4:       omp.inner.for.body:
23901 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23902 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
23903 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23904 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
23905 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4
23906 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]]
23907 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
23908 // CHECK4-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
23909 // CHECK4-NEXT:    store i32 [[ADD1]], i32* [[ARRAYIDX]], align 4
23910 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23911 // CHECK4:       omp.body.continue:
23912 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23913 // CHECK4:       omp.inner.for.inc:
23914 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23915 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23916 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
23917 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
23918 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
23919 // CHECK4:       omp.inner.for.end:
23920 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23921 // CHECK4:       omp.loop.exit:
23922 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
23923 // CHECK4-NEXT:    ret void
23924 //
23925 //
23926 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
23927 // CHECK4-SAME: ([10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
23928 // CHECK4-NEXT:  entry:
23929 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
23930 // CHECK4-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
23931 // CHECK4-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
23932 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
23933 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
23934 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
23935 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
23936 // CHECK4-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
23937 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
23938 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
23939 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
23940 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
23941 // CHECK4:       user_code.entry:
23942 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
23943 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[F_ADDR]], align 4
23944 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[F_CASTED]], align 4
23945 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[F_CASTED]], align 4
23946 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
23947 // CHECK4-NEXT:    call void @__omp_outlined__6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], [10 x [10 x i32]]* [[TMP0]], i32 [[TMP4]]) #[[ATTR1]]
23948 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
23949 // CHECK4-NEXT:    ret void
23950 // CHECK4:       worker.exit:
23951 // CHECK4-NEXT:    ret void
23952 //
23953 //
23954 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__6
23955 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
23956 // CHECK4-NEXT:  entry:
23957 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23958 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23959 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
23960 // CHECK4-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
23961 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23962 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23963 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
23964 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23965 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23966 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23967 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23968 // CHECK4-NEXT:    [[K:%.*]] = alloca i32, align 4
23969 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
23970 // CHECK4-NEXT:    [[J:%.*]] = alloca i32, align 4
23971 // CHECK4-NEXT:    [[F_CASTED:%.*]] = alloca i32, align 4
23972 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
23973 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23974 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23975 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
23976 // CHECK4-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
23977 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
23978 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23979 // CHECK4-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
23980 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23981 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23982 // CHECK4-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
23983 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23984 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
23985 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
23986 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23987 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
23988 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23989 // CHECK4:       cond.true:
23990 // CHECK4-NEXT:    br label [[COND_END:%.*]]
23991 // CHECK4:       cond.false:
23992 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23993 // CHECK4-NEXT:    br label [[COND_END]]
23994 // CHECK4:       cond.end:
23995 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23996 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23997 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23998 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23999 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24000 // CHECK4:       omp.inner.for.cond:
24001 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24002 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
24003 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24004 // CHECK4:       omp.inner.for.body:
24005 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24006 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24007 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[F_ADDR]], align 4
24008 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[F_CASTED]], align 4
24009 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[F_CASTED]], align 4
24010 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
24011 // CHECK4-NEXT:    [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to i8*
24012 // CHECK4-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 4
24013 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
24014 // CHECK4-NEXT:    [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to i8*
24015 // CHECK4-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 4
24016 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
24017 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
24018 // CHECK4-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 4
24019 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
24020 // CHECK4-NEXT:    [[TMP18:%.*]] = inttoptr i32 [[TMP10]] to i8*
24021 // CHECK4-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 4
24022 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
24023 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, [10 x [10 x i32]]*, i32)* @__omp_outlined__7 to i8*), i8* null, i8** [[TMP19]], i32 4)
24024 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24025 // CHECK4:       omp.inner.for.inc:
24026 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24027 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24028 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
24029 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
24030 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24031 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24032 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
24033 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_COMB_LB]], align 4
24034 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24035 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24036 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
24037 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_COMB_UB]], align 4
24038 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24039 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
24040 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
24041 // CHECK4:       cond.true6:
24042 // CHECK4-NEXT:    br label [[COND_END8:%.*]]
24043 // CHECK4:       cond.false7:
24044 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24045 // CHECK4-NEXT:    br label [[COND_END8]]
24046 // CHECK4:       cond.end8:
24047 // CHECK4-NEXT:    [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
24048 // CHECK4-NEXT:    store i32 [[COND9]], i32* [[DOTOMP_COMB_UB]], align 4
24049 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24050 // CHECK4-NEXT:    store i32 [[TMP28]], i32* [[DOTOMP_IV]], align 4
24051 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24052 // CHECK4:       omp.inner.for.end:
24053 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24054 // CHECK4:       omp.loop.exit:
24055 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
24056 // CHECK4-NEXT:    ret void
24057 //
24058 //
24059 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__7
24060 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
24061 // CHECK4-NEXT:  entry:
24062 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24063 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24064 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24065 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24066 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
24067 // CHECK4-NEXT:    [[F_ADDR:%.*]] = alloca i32, align 4
24068 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24069 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24070 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
24071 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24072 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24073 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24074 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24075 // CHECK4-NEXT:    [[K:%.*]] = alloca i32, align 4
24076 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
24077 // CHECK4-NEXT:    [[J:%.*]] = alloca i32, align 4
24078 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24079 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24080 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24081 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24082 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
24083 // CHECK4-NEXT:    store i32 [[F]], i32* [[F_ADDR]], align 4
24084 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
24085 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24086 // CHECK4-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
24087 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24088 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24089 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
24090 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
24091 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24092 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24093 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24094 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
24095 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24096 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24097 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24098 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24099 // CHECK4:       omp.inner.for.cond:
24100 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24101 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24102 // CHECK4-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
24103 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24104 // CHECK4:       omp.inner.for.body:
24105 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24106 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
24107 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
24108 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24109 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
24110 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24111 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24112 // CHECK4-NEXT:    [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
24113 // CHECK4-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
24114 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
24115 // CHECK4-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
24116 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
24117 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[J]], align 4
24118 // CHECK4-NEXT:    store i32 10, i32* [[K]], align 4
24119 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
24120 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
24121 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[F_ADDR]], align 4
24122 // CHECK4-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
24123 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
24124 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[K]], align 4
24125 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
24126 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
24127 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP15]]
24128 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[J]], align 4
24129 // CHECK4-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP16]]
24130 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX9]], align 4
24131 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24132 // CHECK4:       omp.body.continue:
24133 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24134 // CHECK4:       omp.inner.for.inc:
24135 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24136 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24137 // CHECK4-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
24138 // CHECK4-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
24139 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24140 // CHECK4:       omp.inner.for.end:
24141 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24142 // CHECK4:       omp.loop.exit:
24143 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
24144 // CHECK4-NEXT:    ret void
24145 //
24146 //
24147 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l52
24148 // CHECK4-SAME: (i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
24149 // CHECK4-NEXT:  entry:
24150 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24151 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
24152 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24153 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
24154 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
24155 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
24156 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24157 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
24158 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
24159 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
24160 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
24161 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
24162 // CHECK4:       user_code.entry:
24163 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
24164 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
24165 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
24166 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
24167 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
24168 // CHECK4-NEXT:    call void @__omp_outlined__8(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [10 x [10 x i32]]* [[TMP0]]) #[[ATTR1]]
24169 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
24170 // CHECK4-NEXT:    ret void
24171 // CHECK4:       worker.exit:
24172 // CHECK4-NEXT:    ret void
24173 //
24174 //
24175 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__8
24176 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
24177 // CHECK4-NEXT:  entry:
24178 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24179 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24180 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24181 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
24182 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24183 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24184 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
24185 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24186 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24187 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
24188 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
24189 // CHECK4-NEXT:    [[J:%.*]] = alloca i32, align 4
24190 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
24191 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
24192 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
24193 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24194 // CHECK4-NEXT:    [[I9:%.*]] = alloca i32, align 4
24195 // CHECK4-NEXT:    [[J10:%.*]] = alloca i32, align 4
24196 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24197 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x i8*], align 4
24198 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24199 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24200 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24201 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
24202 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
24203 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24204 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
24205 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24206 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24207 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24208 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
24209 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24210 // CHECK4-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
24211 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24212 // CHECK4-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
24213 // CHECK4-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
24214 // CHECK4-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
24215 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
24216 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
24217 // CHECK4-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
24218 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
24219 // CHECK4-NEXT:    store i32 0, i32* [[J]], align 4
24220 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24221 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
24222 // CHECK4-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
24223 // CHECK4:       land.lhs.true:
24224 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24225 // CHECK4-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
24226 // CHECK4-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
24227 // CHECK4:       omp.precond.then:
24228 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
24229 // CHECK4-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24230 // CHECK4-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_COMB_UB]], align 8
24231 // CHECK4-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
24232 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24233 // CHECK4-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
24234 // CHECK4-NEXT:    [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
24235 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24236 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24237 // CHECK4-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
24238 // CHECK4-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24239 // CHECK4-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24240 // CHECK4-NEXT:    [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
24241 // CHECK4-NEXT:    br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24242 // CHECK4:       cond.true:
24243 // CHECK4-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24244 // CHECK4-NEXT:    br label [[COND_END:%.*]]
24245 // CHECK4:       cond.false:
24246 // CHECK4-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24247 // CHECK4-NEXT:    br label [[COND_END]]
24248 // CHECK4:       cond.end:
24249 // CHECK4-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
24250 // CHECK4-NEXT:    store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
24251 // CHECK4-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
24252 // CHECK4-NEXT:    store i64 [[TMP14]], i64* [[DOTOMP_IV]], align 8
24253 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24254 // CHECK4:       omp.inner.for.cond:
24255 // CHECK4-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24256 // CHECK4-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24257 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
24258 // CHECK4-NEXT:    [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
24259 // CHECK4-NEXT:    br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24260 // CHECK4:       omp.inner.for.body:
24261 // CHECK4-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
24262 // CHECK4-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
24263 // CHECK4-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24264 // CHECK4-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
24265 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[N_ADDR]], align 4
24266 // CHECK4-NEXT:    store i32 [[TMP21]], i32* [[N_CASTED]], align 4
24267 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[N_CASTED]], align 4
24268 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
24269 // CHECK4-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to i8*
24270 // CHECK4-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
24271 // CHECK4-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
24272 // CHECK4-NEXT:    [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to i8*
24273 // CHECK4-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
24274 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
24275 // CHECK4-NEXT:    [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to i8*
24276 // CHECK4-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
24277 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
24278 // CHECK4-NEXT:    [[TMP30:%.*]] = bitcast [10 x [10 x i32]]* [[TMP0]] to i8*
24279 // CHECK4-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 4
24280 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24281 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
24282 // CHECK4-NEXT:    [[TMP33:%.*]] = bitcast [4 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
24283 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP32]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [10 x [10 x i32]]*)* @__omp_outlined__9 to i8*), i8* null, i8** [[TMP33]], i32 4)
24284 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24285 // CHECK4:       omp.inner.for.inc:
24286 // CHECK4-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24287 // CHECK4-NEXT:    [[TMP35:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
24288 // CHECK4-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
24289 // CHECK4-NEXT:    store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
24290 // CHECK4-NEXT:    [[TMP36:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
24291 // CHECK4-NEXT:    [[TMP37:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
24292 // CHECK4-NEXT:    [[ADD15:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
24293 // CHECK4-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_COMB_LB]], align 8
24294 // CHECK4-NEXT:    [[TMP38:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24295 // CHECK4-NEXT:    [[TMP39:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
24296 // CHECK4-NEXT:    [[ADD16:%.*]] = add nsw i64 [[TMP38]], [[TMP39]]
24297 // CHECK4-NEXT:    store i64 [[ADD16]], i64* [[DOTOMP_COMB_UB]], align 8
24298 // CHECK4-NEXT:    [[TMP40:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24299 // CHECK4-NEXT:    [[TMP41:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24300 // CHECK4-NEXT:    [[CMP17:%.*]] = icmp sgt i64 [[TMP40]], [[TMP41]]
24301 // CHECK4-NEXT:    br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
24302 // CHECK4:       cond.true18:
24303 // CHECK4-NEXT:    [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24304 // CHECK4-NEXT:    br label [[COND_END20:%.*]]
24305 // CHECK4:       cond.false19:
24306 // CHECK4-NEXT:    [[TMP43:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
24307 // CHECK4-NEXT:    br label [[COND_END20]]
24308 // CHECK4:       cond.end20:
24309 // CHECK4-NEXT:    [[COND21:%.*]] = phi i64 [ [[TMP42]], [[COND_TRUE18]] ], [ [[TMP43]], [[COND_FALSE19]] ]
24310 // CHECK4-NEXT:    store i64 [[COND21]], i64* [[DOTOMP_COMB_UB]], align 8
24311 // CHECK4-NEXT:    [[TMP44:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
24312 // CHECK4-NEXT:    store i64 [[TMP44]], i64* [[DOTOMP_IV]], align 8
24313 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24314 // CHECK4:       omp.inner.for.end:
24315 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24316 // CHECK4:       omp.loop.exit:
24317 // CHECK4-NEXT:    [[TMP45:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24318 // CHECK4-NEXT:    [[TMP46:%.*]] = load i32, i32* [[TMP45]], align 4
24319 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP46]])
24320 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
24321 // CHECK4:       omp.precond.end:
24322 // CHECK4-NEXT:    ret void
24323 //
24324 //
24325 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__9
24326 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [10 x [10 x i32]]* nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR0]] {
24327 // CHECK4-NEXT:  entry:
24328 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24329 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24330 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24331 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24332 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24333 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [10 x [10 x i32]]*, align 4
24334 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24335 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24336 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
24337 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24338 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24339 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
24340 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
24341 // CHECK4-NEXT:    [[J:%.*]] = alloca i32, align 4
24342 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
24343 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
24344 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
24345 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24346 // CHECK4-NEXT:    [[I11:%.*]] = alloca i32, align 4
24347 // CHECK4-NEXT:    [[J12:%.*]] = alloca i32, align 4
24348 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24349 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24350 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24351 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24352 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24353 // CHECK4-NEXT:    store [10 x [10 x i32]]* [[C]], [10 x [10 x i32]]** [[C_ADDR]], align 4
24354 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x [10 x i32]]*, [10 x [10 x i32]]** [[C_ADDR]], align 4
24355 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24356 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
24357 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24358 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24359 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24360 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
24361 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24362 // CHECK4-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
24363 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24364 // CHECK4-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
24365 // CHECK4-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
24366 // CHECK4-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
24367 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
24368 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
24369 // CHECK4-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
24370 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
24371 // CHECK4-NEXT:    store i32 0, i32* [[J]], align 4
24372 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24373 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
24374 // CHECK4-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
24375 // CHECK4:       land.lhs.true:
24376 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24377 // CHECK4-NEXT:    [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
24378 // CHECK4-NEXT:    br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
24379 // CHECK4:       omp.precond.then:
24380 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
24381 // CHECK4-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
24382 // CHECK4-NEXT:    store i64 [[TMP7]], i64* [[DOTOMP_UB]], align 8
24383 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24384 // CHECK4-NEXT:    [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
24385 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24386 // CHECK4-NEXT:    [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
24387 // CHECK4-NEXT:    store i64 [[CONV9]], i64* [[DOTOMP_LB]], align 8
24388 // CHECK4-NEXT:    store i64 [[CONV10]], i64* [[DOTOMP_UB]], align 8
24389 // CHECK4-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
24390 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24391 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24392 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24393 // CHECK4-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
24394 // CHECK4-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
24395 // CHECK4-NEXT:    store i64 [[TMP12]], i64* [[DOTOMP_IV]], align 8
24396 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24397 // CHECK4:       omp.inner.for.cond:
24398 // CHECK4-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24399 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24400 // CHECK4-NEXT:    [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
24401 // CHECK4-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
24402 // CHECK4-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24403 // CHECK4:       omp.inner.for.body:
24404 // CHECK4-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24405 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24406 // CHECK4-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
24407 // CHECK4-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
24408 // CHECK4-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
24409 // CHECK4-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
24410 // CHECK4-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
24411 // CHECK4-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
24412 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
24413 // CHECK4-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
24414 // CHECK4-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
24415 // CHECK4-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24416 // CHECK4-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24417 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24418 // CHECK4-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
24419 // CHECK4-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
24420 // CHECK4-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
24421 // CHECK4-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
24422 // CHECK4-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
24423 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24424 // CHECK4-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
24425 // CHECK4-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
24426 // CHECK4-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
24427 // CHECK4-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
24428 // CHECK4-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
24429 // CHECK4-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
24430 // CHECK4-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
24431 // CHECK4-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
24432 // CHECK4-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
24433 // CHECK4-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
24434 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I11]], align 4
24435 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[J12]], align 4
24436 // CHECK4-NEXT:    [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
24437 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I11]], align 4
24438 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[TMP0]], i32 0, i32 [[TMP23]]
24439 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[J12]], align 4
24440 // CHECK4-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP24]]
24441 // CHECK4-NEXT:    store i32 [[ADD36]], i32* [[ARRAYIDX37]], align 4
24442 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24443 // CHECK4:       omp.body.continue:
24444 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24445 // CHECK4:       omp.inner.for.inc:
24446 // CHECK4-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
24447 // CHECK4-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
24448 // CHECK4-NEXT:    [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
24449 // CHECK4-NEXT:    store i64 [[ADD38]], i64* [[DOTOMP_IV]], align 8
24450 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24451 // CHECK4:       omp.inner.for.end:
24452 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24453 // CHECK4:       omp.loop.exit:
24454 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24455 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
24456 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP28]])
24457 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
24458 // CHECK4:       omp.precond.end:
24459 // CHECK4-NEXT:    ret void
24460 //
24461 //
24462 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l59
24463 // CHECK4-SAME: (i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
24464 // CHECK4-NEXT:  entry:
24465 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24466 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
24467 // CHECK4-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
24468 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24469 // CHECK4-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
24470 // CHECK4-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
24471 // CHECK4-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
24472 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24473 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
24474 // CHECK4-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
24475 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
24476 // CHECK4-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 true, i1 false, i1 false)
24477 // CHECK4-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
24478 // CHECK4-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
24479 // CHECK4:       user_code.entry:
24480 // CHECK4-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4]])
24481 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
24482 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
24483 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
24484 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[V_ADDR]], align 4
24485 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
24486 // CHECK4-NEXT:    call void @__omp_outlined__10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], [1000 x i32]* [[TMP0]], i32* [[TMP5]]) #[[ATTR1]]
24487 // CHECK4-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 true, i1 false)
24488 // CHECK4-NEXT:    ret void
24489 // CHECK4:       worker.exit:
24490 // CHECK4-NEXT:    ret void
24491 //
24492 //
24493 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__10
24494 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
24495 // CHECK4-NEXT:  entry:
24496 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24497 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24498 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24499 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
24500 // CHECK4-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
24501 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24502 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24503 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24504 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24505 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
24506 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24507 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24508 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24509 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24510 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
24511 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24512 // CHECK4-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
24513 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24514 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24515 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24516 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
24517 // CHECK4-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
24518 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
24519 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24520 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
24521 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24522 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
24523 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24524 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24525 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24526 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
24527 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24528 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
24529 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24530 // CHECK4:       omp.precond.then:
24531 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24532 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24533 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
24534 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24535 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24536 // CHECK4-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
24537 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24538 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
24539 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
24540 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24541 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24542 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
24543 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24544 // CHECK4:       cond.true:
24545 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24546 // CHECK4-NEXT:    br label [[COND_END:%.*]]
24547 // CHECK4:       cond.false:
24548 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24549 // CHECK4-NEXT:    br label [[COND_END]]
24550 // CHECK4:       cond.end:
24551 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
24552 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24553 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24554 // CHECK4-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
24555 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24556 // CHECK4:       omp.inner.for.cond:
24557 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24558 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24559 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
24560 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
24561 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24562 // CHECK4:       omp.inner.for.body:
24563 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24564 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24565 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[N_ADDR]], align 4
24566 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[N_CASTED]], align 4
24567 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[N_CASTED]], align 4
24568 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[V_ADDR]], align 4
24569 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
24570 // CHECK4-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to i8*
24571 // CHECK4-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 4
24572 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
24573 // CHECK4-NEXT:    [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to i8*
24574 // CHECK4-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 4
24575 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
24576 // CHECK4-NEXT:    [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to i8*
24577 // CHECK4-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 4
24578 // CHECK4-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
24579 // CHECK4-NEXT:    [[TMP26:%.*]] = bitcast [1000 x i32]* [[TMP0]] to i8*
24580 // CHECK4-NEXT:    store i8* [[TMP26]], i8** [[TMP25]], align 4
24581 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
24582 // CHECK4-NEXT:    [[TMP28:%.*]] = bitcast i32* [[TMP18]] to i8*
24583 // CHECK4-NEXT:    store i8* [[TMP28]], i8** [[TMP27]], align 4
24584 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24585 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
24586 // CHECK4-NEXT:    [[TMP31:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
24587 // CHECK4-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, [1000 x i32]*, i32*)* @__omp_outlined__11 to i8*), i8* null, i8** [[TMP31]], i32 5)
24588 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24589 // CHECK4:       omp.inner.for.inc:
24590 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24591 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24592 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
24593 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
24594 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24595 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24596 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
24597 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4
24598 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24599 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24600 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
24601 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4
24602 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24603 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24604 // CHECK4-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
24605 // CHECK4-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
24606 // CHECK4:       cond.true10:
24607 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24608 // CHECK4-NEXT:    br label [[COND_END12:%.*]]
24609 // CHECK4:       cond.false11:
24610 // CHECK4-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24611 // CHECK4-NEXT:    br label [[COND_END12]]
24612 // CHECK4:       cond.end12:
24613 // CHECK4-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
24614 // CHECK4-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4
24615 // CHECK4-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24616 // CHECK4-NEXT:    store i32 [[TMP42]], i32* [[DOTOMP_IV]], align 4
24617 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24618 // CHECK4:       omp.inner.for.end:
24619 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24620 // CHECK4:       omp.loop.exit:
24621 // CHECK4-NEXT:    [[TMP43:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24622 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32, i32* [[TMP43]], align 4
24623 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP44]])
24624 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
24625 // CHECK4:       omp.precond.end:
24626 // CHECK4-NEXT:    ret void
24627 //
24628 //
24629 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__11
24630 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], [1000 x i32]* nonnull align 4 dereferenceable(4000) [[A:%.*]], i32* [[V:%.*]]) #[[ATTR0]] {
24631 // CHECK4-NEXT:  entry:
24632 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24633 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24634 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24635 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24636 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24637 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca [1000 x i32]*, align 4
24638 // CHECK4-NEXT:    [[V_ADDR:%.*]] = alloca i32*, align 4
24639 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24640 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24641 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24642 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24643 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
24644 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24645 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24646 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24647 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24648 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
24649 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24650 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24651 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24652 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24653 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24654 // CHECK4-NEXT:    store [1000 x i32]* [[A]], [1000 x i32]** [[A_ADDR]], align 4
24655 // CHECK4-NEXT:    store i32* [[V]], i32** [[V_ADDR]], align 4
24656 // CHECK4-NEXT:    [[TMP0:%.*]] = load [1000 x i32]*, [1000 x i32]** [[A_ADDR]], align 4
24657 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24658 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
24659 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24660 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
24661 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24662 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24663 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24664 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
24665 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24666 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
24667 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24668 // CHECK4:       omp.precond.then:
24669 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24670 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24671 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
24672 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24673 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24674 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
24675 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
24676 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24677 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24678 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24679 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
24680 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP8]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24681 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24682 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
24683 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24684 // CHECK4:       omp.inner.for.cond:
24685 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24686 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24687 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
24688 // CHECK4-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24689 // CHECK4:       omp.inner.for.body:
24690 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24691 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
24692 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24693 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4
24694 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[V_ADDR]], align 4
24695 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[I3]], align 4
24696 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i32 [[TMP14]]
24697 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
24698 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I3]], align 4
24699 // CHECK4-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
24700 // CHECK4-NEXT:    store i32 [[TMP15]], i32* [[ARRAYIDX5]], align 4
24701 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24702 // CHECK4:       omp.body.continue:
24703 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24704 // CHECK4:       omp.inner.for.inc:
24705 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24706 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24707 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
24708 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
24709 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
24710 // CHECK4:       omp.inner.for.end:
24711 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24712 // CHECK4:       omp.loop.exit:
24713 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24714 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
24715 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP20]])
24716 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
24717 // CHECK4:       omp.precond.end:
24718 // CHECK4-NEXT:    ret void
24719 //
24720