1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host code gen
3 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
9 
10 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
11 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
13 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
14 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
16 
17 // RUN: %clang_cc1  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
18 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
20 // RUN: %clang_cc1  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
23 
24 // RUN: %clang_cc1  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
25 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
26 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
27 // RUN: %clang_cc1  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
28 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
29 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 
35 template <typename T>
tmain()36 T tmain() {
37   T *a, *b, *c;
38   int n = 10000;
39   int ch = 100;
40 
41   // no schedule clauses
42   #pragma omp target
43   #pragma omp teams
44   #pragma omp distribute parallel for simd
45   for (int i = 0; i < n; ++i) {
46     a[i] = b[i] + c[i];
47   }
48 
49   // dist_schedule: static no chunk
50   #pragma omp target
51   #pragma omp teams
52   #pragma omp distribute parallel for simd dist_schedule(static)
53   for (int i = 0; i < n; ++i) {
54     a[i] = b[i] + c[i];
55   }
56 
57   // dist_schedule: static chunk
58   #pragma omp target
59   #pragma omp teams
60   #pragma omp distribute parallel for simd dist_schedule(static, ch)
61   for (int i = 0; i < n; ++i) {
62     a[i] = b[i] + c[i];
63   }
64 
65   // schedule: static no chunk
66   #pragma omp target
67   #pragma omp teams
68   #pragma omp distribute parallel for simd schedule(static)
69   for (int i = 0; i < n; ++i) {
70     a[i] = b[i] + c[i];
71   }
72 
73   // schedule: static chunk
74   #pragma omp target
75   #pragma omp teams
76   #pragma omp distribute parallel for simd schedule(static, ch)
77   for (int i = 0; i < n; ++i) {
78     a[i] = b[i] + c[i];
79   }
80 
81   // schedule: dynamic no chunk
82   #pragma omp target
83   #pragma omp teams
84   #pragma omp distribute parallel for simd schedule(dynamic)
85   for (int i = 0; i < n; ++i) {
86     a[i] = b[i] + c[i];
87   }
88 
89   // schedule: dynamic chunk
90   #pragma omp target
91   #pragma omp teams
92   #pragma omp distribute parallel for simd schedule(dynamic, ch)
93   for (int i = 0; i < n; ++i) {
94     a[i] = b[i] + c[i];
95   }
96 
97   return T();
98 }
99 
main()100 int main() {
101   double *a, *b, *c;
102   int n = 10000;
103   int ch = 100;
104 
105 #ifdef LAMBDA
106   [&]() {
107 
108 
109 
110 
111 
112 
113 
114 
115     // no schedule clauses
116     #pragma omp target
117     #pragma omp teams
118 
119     #pragma omp distribute parallel for simd
120     for (int i = 0; i < n; ++i) {
121       a[i] = b[i] + c[i];
122 
123 
124       // check EUB for distribute
125 
126       // initialize omp.iv
127 
128       // check exit condition
129 
130       // check that PrevLB and PrevUB are passed to the 'for'
131       // check that distlb and distub are properly passed to fork_call
132 
133       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
134 
135 
136       // implementation of 'parallel for'
137 
138 
139       // initialize lb and ub to PrevLB and PrevUB
140 
141       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
142       // In this case we use EUB
143 
144       // initialize omp.iv
145 
146       // check exit condition
147 
148       // check that PrevLB and PrevUB are passed to the 'for'
149 
150       // check stride 1 for 'for' in 'distribute parallel for simd'
151 
152 
153       [&]() {
154 	a[i] = b[i] + c[i];
155       }();
156     }
157 
158     // dist_schedule: static no chunk (same sa default - no dist_schedule)
159     #pragma omp target
160     #pragma omp teams
161 
162     #pragma omp distribute parallel for simd dist_schedule(static)
163     for (int i = 0; i < n; ++i) {
164       a[i] = b[i] + c[i];
165 
166 
167       // check EUB for distribute
168 
169       // initialize omp.iv
170 
171       // check exit condition
172 
173       // check that PrevLB and PrevUB are passed to the 'for'
174       // check that distlb and distub are properly passed to fork_call
175 
176       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
177 
178 
179       // implementation of 'parallel for'
180 
181 
182       // initialize lb and ub to PrevLB and PrevUB
183 
184       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
185       // In this case we use EUB
186 
187       // initialize omp.iv
188 
189       // check exit condition
190 
191       // check that PrevLB and PrevUB are passed to the 'for'
192 
193       // check stride 1 for 'for' in 'distribute parallel for simd'
194 
195       [&]() {
196 	a[i] = b[i] + c[i];
197       }();
198     }
199 
200     // dist_schedule: static chunk
201     #pragma omp target
202     #pragma omp teams
203 
204     #pragma omp distribute parallel for simd dist_schedule(static, ch)
205     for (int i = 0; i < n; ++i) {
206       a[i] = b[i] + c[i];
207 
208 
209       // check EUB for distribute
210 
211       // initialize omp.iv
212 
213       // check exit condition
214 
215       // check that PrevLB and PrevUB are passed to the 'for'
216       // check that distlb and distub are properly passed to fork_call
217 
218       // check DistInc
219 
220       // Update UB
221 
222       // Store LB in IV
223 
224 
225       // loop exit
226 
227       // skip implementation of 'parallel for': using default scheduling and was tested above
228       [&]() {
229 	a[i] = b[i] + c[i];
230       }();
231     }
232 
233     // schedule: static no chunk
234     #pragma omp target
235     #pragma omp teams
236 
237     #pragma omp distribute parallel for simd schedule(static)
238     for (int i = 0; i < n; ++i) {
239       a[i] = b[i] + c[i];
240 
241       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
242 
243       // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
244 
245 
246       // initialize lb and ub to PrevLB and PrevUB
247 
248       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
249       // In this case we use EUB
250 
251       // initialize omp.iv
252 
253       // check exit condition
254 
255       // check that PrevLB and PrevUB are passed to the 'for'
256 
257       // check stride 1 for 'for' in 'distribute parallel for simd'
258 
259 
260       [&]() {
261 	a[i] = b[i] + c[i];
262       }();
263     }
264 
265     // schedule: static chunk
266     #pragma omp target
267     #pragma omp teams
268 
269     #pragma omp distribute parallel for simd schedule(static, ch)
270     for (int i = 0; i < n; ++i) {
271       a[i] = b[i] + c[i];
272       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
273 
274       // 'parallel for' implementation using outer and inner loops and PrevEUB
275 
276       // initialize lb and ub to PrevLB and PrevUB
277 
278       // check PrevEUB (using PrevUB instead of NumIt as upper bound)
279 
280       // initialize omp.iv (IV = LB)
281 
282       // outer loop: while (IV < UB) {
283 
284 
285 
286       // skip body branch
287 
288       // IV = IV + 1 and inner loop latch
289 
290       // check NextLB and NextUB
291 
292 
293       [&]() {
294 	a[i] = b[i] + c[i];
295       }();
296     }
297 
298     // schedule: dynamic no chunk
299     #pragma omp target
300     #pragma omp teams
301 
302     #pragma omp distribute parallel for simd schedule(dynamic)
303     for (int i = 0; i < n; ++i) {
304       a[i] = b[i] + c[i];
305       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
306 
307       // 'parallel for' implementation using outer and inner loops and PrevEUB
308 
309       // initialize lb and ub to PrevLB and PrevUB
310 
311 
312       // initialize omp.iv (IV = LB)
313 
314 
315       // skip body branch
316 
317       // IV = IV + 1 and inner loop latch
318 
319       // check NextLB and NextUB
320 
321 
322       [&]() {
323 	a[i] = b[i] + c[i];
324       }();
325     }
326 
327     // schedule: dynamic chunk
328     #pragma omp target
329     #pragma omp teams
330 
331     #pragma omp distribute parallel for simd schedule(dynamic, ch)
332     for (int i = 0; i < n; ++i) {
333       a[i] = b[i] + c[i];
334       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
335 
336       // 'parallel for' implementation using outer and inner loops and PrevEUB
337 
338       // initialize lb and ub to PrevLB and PrevUB
339 
340 
341       // initialize omp.iv (IV = LB)
342 
343 
344       // skip body branch
345 
346       // IV = IV + 1 and inner loop latch
347 
348       // check NextLB and NextUB
349 
350 
351       [&]() {
352 	a[i] = b[i] + c[i];
353       }();
354     }
355   }();
356   return 0;
357 #else
358 
359 
360 
361 
362 
363 
364 
365 
366 
367   // no schedule clauses
368   #pragma omp target
369   #pragma omp teams
370 
371   #pragma omp distribute parallel for simd
372   for (int i = 0; i < n; ++i) {
373     a[i] = b[i] + c[i];
374 
375 
376     // check EUB for distribute
377 
378     // initialize omp.iv
379 
380     // check exit condition
381 
382     // check that PrevLB and PrevUB are passed to the 'for'
383     // check that distlb and distub are properly passed to fork_call
384 
385     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
386 
387 
388     // implementation of 'parallel for'
389 
390 
391     // initialize lb and ub to PrevLB and PrevUB
392 
393     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
394     // In this case we use EUB
395 
396     // initialize omp.iv
397 
398     // check exit condition
399 
400     // check that PrevLB and PrevUB are passed to the 'for'
401 
402     // check stride 1 for 'for' in 'distribute parallel for simd'
403 
404   }
405 
406   // dist_schedule: static no chunk
407   #pragma omp target
408   #pragma omp teams
409 
410   #pragma omp distribute parallel for simd dist_schedule(static)
411   for (int i = 0; i < n; ++i) {
412     a[i] = b[i] + c[i];
413 
414 
415     // check EUB for distribute
416 
417     // initialize omp.iv
418 
419     // check exit condition
420 
421     // check that PrevLB and PrevUB are passed to the 'for'
422     // check that distlb and distub are properly passed to fork_call
423 
424     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
425 
426 
427     // implementation of 'parallel for'
428 
429 
430     // initialize lb and ub to PrevLB and PrevUB
431 
432     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
433     // In this case we use EUB
434 
435     // initialize omp.iv
436 
437     // check exit condition
438 
439     // check that PrevLB and PrevUB are passed to the 'for'
440 
441     // check stride 1 for 'for' in 'distribute parallel for simd'
442 
443   }
444 
445   // dist_schedule: static chunk
446   #pragma omp target
447   #pragma omp teams
448 
449   #pragma omp distribute parallel for simd dist_schedule(static, ch)
450   for (int i = 0; i < n; ++i) {
451     a[i] = b[i] + c[i];
452 
453     // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
454 
455     // check EUB for distribute
456 
457     // initialize omp.iv
458 
459     // check exit condition
460 
461     // check that PrevLB and PrevUB are passed to the 'for'
462     // check that distlb and distub are properly passed to fork_call
463 
464     // check DistInc
465 
466     // Update UB
467 
468     // Store LB in IV
469 
470 
471     // loop exit
472 
473     // skip implementation of 'parallel for': using default scheduling and was tested above
474   }
475 
476   // schedule: static no chunk
477   #pragma omp target
478   #pragma omp teams
479 
480   #pragma omp distribute parallel for simd schedule(static)
481   for (int i = 0; i < n; ++i) {
482     a[i] = b[i] + c[i];
483 
484     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
485 
486     // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
487 
488 
489     // initialize lb and ub to PrevLB and PrevUB
490 
491     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
492     // In this case we use EUB
493 
494     // initialize omp.iv
495 
496     // check exit condition
497 
498     // check that PrevLB and PrevUB are passed to the 'for'
499 
500     // check stride 1 for 'for' in 'distribute parallel for simd'
501 
502   }
503 
504   // schedule: static chunk
505   #pragma omp target
506   #pragma omp teams
507 
508   #pragma omp distribute parallel for simd schedule(static, ch)
509   for (int i = 0; i < n; ++i) {
510     a[i] = b[i] + c[i];
511     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
512 
513     // 'parallel for' implementation using outer and inner loops and PrevEUB
514 
515     // initialize lb and ub to PrevLB and PrevUB
516 
517     // check PrevEUB (using PrevUB instead of NumIt as upper bound)
518 
519     // initialize omp.iv (IV = LB)
520 
521     // outer loop: while (IV < UB) {
522 
523 
524 
525     // skip body branch
526 
527     // IV = IV + 1 and inner loop latch
528 
529     // check NextLB and NextUB
530 
531 
532   }
533 
534   // schedule: dynamic no chunk
535   #pragma omp target
536   #pragma omp teams
537 
538   #pragma omp distribute parallel for simd schedule(dynamic)
539   for (int i = 0; i < n; ++i) {
540     a[i] = b[i] + c[i];
541     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
542 
543     // 'parallel for' implementation using outer and inner loops and PrevEUB
544 
545     // initialize lb and ub to PrevLB and PrevUB
546 
547 
548     // initialize omp.iv (IV = LB)
549 
550 
551     // skip body branch
552 
553     // IV = IV + 1 and inner loop latch
554 
555     // check NextLB and NextUB
556 
557 
558   }
559 
560   // schedule: dynamic chunk
561   #pragma omp target
562   #pragma omp teams
563 
564   #pragma omp distribute parallel for simd schedule(dynamic, ch)
565   for (int i = 0; i < n; ++i) {
566     a[i] = b[i] + c[i];
567     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
568 
569     // 'parallel for' implementation using outer and inner loops and PrevEUB
570 
571     // initialize lb and ub to PrevLB and PrevUB
572 
573 
574     // initialize omp.iv (IV = LB)
575 
576 
577     // skip body branch
578 
579     // IV = IV + 1 and inner loop latch
580 
581     // check NextLB and NextUB
582 
583 
584   }
585 
586   return tmain<int>();
587 #endif
588 }
589 
590 // check code
591 
592 
593 
594 
595 
596 
597 
598 
599 
600 
601 
602 // check EUB for distribute
603 
604 // initialize omp.iv
605 
606 // check exit condition
607 
608 // check that PrevLB and PrevUB are passed to the 'for'
609 // check that distlb and distub are properly passed to fork_call
610 
611 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
612 
613 
614 // implementation of 'parallel for'
615 
616 
617 // initialize lb and ub to PrevLB and PrevUB
618 
619 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
620 // In this case we use EUB
621 
622 // initialize omp.iv
623 
624 // check exit condition
625 
626 // check that PrevLB and PrevUB are passed to the 'for'
627 
628 // check stride 1 for 'for' in 'distribute parallel for simd'
629 
630 
631 
632 
633 
634 // check EUB for distribute
635 
636 // initialize omp.iv
637 
638 // check exit condition
639 
640 // check that PrevLB and PrevUB are passed to the 'for'
641 // check that distlb and distub are properly passed to fork_call
642 
643 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
644 
645 
646 // implementation of 'parallel for'
647 
648 
649 // initialize lb and ub to PrevLB and PrevUB
650 
651 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
652 // In this case we use EUB
653 
654 // initialize omp.iv
655 
656 // check exit condition
657 
658 // check that PrevLB and PrevUB are passed to the 'for'
659 
660 // check stride 1 for 'for' in 'distribute parallel for simd'
661 
662 
663 
664 
665 // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
666 
667 // check EUB for distribute
668 
669 // initialize omp.iv
670 
671 // check exit condition
672 
673 // check that PrevLB and PrevUB are passed to the 'for'
674 // check that distlb and distub are properly passed to fork_call
675 
676 // check DistInc
677 
678 // Update UB
679 
680 // Store LB in IV
681 
682 
683 // loop exit
684 
685 // skip implementation of 'parallel for': using default scheduling and was tested above
686 
687 
688 
689 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
690 
691 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
692 
693 
694 // initialize lb and ub to PrevLB and PrevUB
695 
696 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
697 // In this case we use EUB
698 
699 // initialize omp.iv
700 
701 // check exit condition
702 
703 // check that PrevLB and PrevUB are passed to the 'for'
704 
705 // check stride 1 for 'for' in 'distribute parallel for simd'
706 
707 
708 
709 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
710 
711 // 'parallel for' implementation using outer and inner loops and PrevEUB
712 
713 // initialize lb and ub to PrevLB and PrevUB
714 
715 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
716 
717 // initialize omp.iv (IV = LB)
718 
719 // outer loop: while (IV < UB) {
720 
721 
722 
723 // skip body branch
724 
725 // IV = IV + 1 and inner loop latch
726 
727 // check NextLB and NextUB
728 
729 
730 
731 
732 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
733 
734 // 'parallel for' implementation using outer and inner loops and PrevEUB
735 
736 // initialize lb and ub to PrevLB and PrevUB
737 
738 
739 // initialize omp.iv (IV = LB)
740 
741 
742 // skip body branch
743 
744 // IV = IV + 1 and inner loop latch
745 
746 // check NextLB and NextUB
747 
748 
749 
750 
751 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
752 
753 // 'parallel for' implementation using outer and inner loops and PrevEUB
754 
755 // initialize lb and ub to PrevLB and PrevUB
756 
757 
758 // initialize omp.iv (IV = LB)
759 
760 
761 // skip body branch
762 
763 // IV = IV + 1 and inner loop latch
764 
765 // check NextLB and NextUB
766 
767 
768 
769 #endif
770 // CHECK1-LABEL: define {{[^@]+}}@main
771 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
772 // CHECK1-NEXT:  entry:
773 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
774 // CHECK1-NEXT:    [[A:%.*]] = alloca double*, align 8
775 // CHECK1-NEXT:    [[B:%.*]] = alloca double*, align 8
776 // CHECK1-NEXT:    [[C:%.*]] = alloca double*, align 8
777 // CHECK1-NEXT:    [[N:%.*]] = alloca i32, align 4
778 // CHECK1-NEXT:    [[CH:%.*]] = alloca i32, align 4
779 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
780 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
781 // CHECK1-NEXT:    store i32 10000, i32* [[N]], align 4
782 // CHECK1-NEXT:    store i32 100, i32* [[CH]], align 4
783 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
784 // CHECK1-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
785 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
786 // CHECK1-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
787 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
788 // CHECK1-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
789 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
790 // CHECK1-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
791 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
792 // CHECK1-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
793 // CHECK1-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
794 // CHECK1-NEXT:    ret i32 0
795 //
796 //
797 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
798 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
799 // CHECK1-NEXT:  entry:
800 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
801 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
802 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
803 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
804 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
805 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
806 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
807 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
808 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
809 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
810 // CHECK1-NEXT:    ret void
811 //
812 //
813 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
814 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
815 // CHECK1-NEXT:  entry:
816 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
817 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
818 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
819 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
820 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
821 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
822 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
830 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
831 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
832 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
833 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
834 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
835 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
836 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
837 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
838 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
839 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
840 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
841 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
842 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
843 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
844 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
845 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
846 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
847 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
848 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
849 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
850 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
851 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
852 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
853 // CHECK1:       omp.precond.then:
854 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
855 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
856 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
857 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
858 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
859 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
860 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
861 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
862 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
863 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
864 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
865 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
866 // CHECK1:       cond.true:
867 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
868 // CHECK1-NEXT:    br label [[COND_END:%.*]]
869 // CHECK1:       cond.false:
870 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
871 // CHECK1-NEXT:    br label [[COND_END]]
872 // CHECK1:       cond.end:
873 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
874 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
875 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
876 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
877 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
878 // CHECK1:       omp.inner.for.cond:
879 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
880 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
881 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
882 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
883 // CHECK1:       omp.inner.for.body:
884 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
885 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
886 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
887 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
888 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
889 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
890 // CHECK1:       omp.inner.for.inc:
891 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
892 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
893 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
894 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
895 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
896 // CHECK1:       omp.inner.for.end:
897 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
898 // CHECK1:       omp.loop.exit:
899 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
900 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
901 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
902 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
903 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
904 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
905 // CHECK1:       .omp.final.then:
906 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
907 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
908 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
909 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
910 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
911 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
912 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
913 // CHECK1:       .omp.final.done:
914 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
915 // CHECK1:       omp.precond.end:
916 // CHECK1-NEXT:    ret void
917 //
918 //
919 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
920 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
921 // CHECK1-NEXT:  entry:
922 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
923 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
924 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
925 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
926 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
927 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
928 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
929 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
930 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
931 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
932 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
933 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
934 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
935 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
936 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
937 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
938 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
939 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
940 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
941 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
942 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
943 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
944 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
945 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
946 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
947 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
948 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
949 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
950 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
951 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
952 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
953 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
954 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
955 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
956 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
957 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
958 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
959 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
960 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
961 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
962 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
963 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
964 // CHECK1:       omp.precond.then:
965 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
966 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
967 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
968 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
969 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
970 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
971 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
972 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
973 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
974 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
975 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
976 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
977 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
978 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
979 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
980 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
981 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
982 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
983 // CHECK1:       cond.true:
984 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
985 // CHECK1-NEXT:    br label [[COND_END:%.*]]
986 // CHECK1:       cond.false:
987 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
988 // CHECK1-NEXT:    br label [[COND_END]]
989 // CHECK1:       cond.end:
990 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
991 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
992 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
993 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
994 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
995 // CHECK1:       omp.inner.for.cond:
996 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
997 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
998 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
999 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1000 // CHECK1:       omp.inner.for.body:
1001 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1002 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1003 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1004 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
1005 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
1006 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1007 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1008 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1009 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
1010 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
1011 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1012 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1013 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1014 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
1015 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1016 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
1017 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1018 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1019 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1020 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
1021 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1022 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
1023 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1024 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
1025 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1026 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
1027 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1028 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
1029 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
1030 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1031 // CHECK1:       omp.body.continue:
1032 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1033 // CHECK1:       omp.inner.for.inc:
1034 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1035 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1036 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1037 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
1038 // CHECK1:       omp.inner.for.end:
1039 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1040 // CHECK1:       omp.loop.exit:
1041 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1042 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1043 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1044 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1045 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1046 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1047 // CHECK1:       .omp.final.then:
1048 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1049 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1050 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1051 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1052 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1053 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1054 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1055 // CHECK1:       .omp.final.done:
1056 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1057 // CHECK1:       omp.precond.end:
1058 // CHECK1-NEXT:    ret void
1059 //
1060 //
1061 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
1062 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1063 // CHECK1-NEXT:  entry:
1064 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1065 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1066 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1067 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1068 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1069 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1070 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1071 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1072 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1073 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1074 // CHECK1-NEXT:    ret void
1075 //
1076 //
1077 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
1078 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1079 // CHECK1-NEXT:  entry:
1080 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1081 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1082 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1083 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1084 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1085 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1086 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1088 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1089 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1090 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1091 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1092 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1093 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1094 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1095 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1096 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1097 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1098 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1099 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1100 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1101 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1102 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1103 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1104 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1105 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1106 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1107 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1108 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1109 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1110 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1111 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1112 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1113 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1114 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1115 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1116 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1117 // CHECK1:       omp.precond.then:
1118 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1119 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1120 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1121 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1122 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1123 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1124 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1125 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1126 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1127 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1128 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1129 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1130 // CHECK1:       cond.true:
1131 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1132 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1133 // CHECK1:       cond.false:
1134 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1135 // CHECK1-NEXT:    br label [[COND_END]]
1136 // CHECK1:       cond.end:
1137 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1138 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1139 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1140 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1141 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1142 // CHECK1:       omp.inner.for.cond:
1143 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1144 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1145 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1146 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1147 // CHECK1:       omp.inner.for.body:
1148 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
1149 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1150 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1151 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1152 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
1153 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1154 // CHECK1:       omp.inner.for.inc:
1155 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1156 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
1157 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1158 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1159 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
1160 // CHECK1:       omp.inner.for.end:
1161 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1162 // CHECK1:       omp.loop.exit:
1163 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1164 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1165 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1166 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1167 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1168 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1169 // CHECK1:       .omp.final.then:
1170 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1171 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1172 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1173 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1174 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1175 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1176 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1177 // CHECK1:       .omp.final.done:
1178 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1179 // CHECK1:       omp.precond.end:
1180 // CHECK1-NEXT:    ret void
1181 //
1182 //
1183 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
1184 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1185 // CHECK1-NEXT:  entry:
1186 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1187 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1188 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1189 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1190 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1191 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1192 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1193 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1194 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1195 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1196 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1197 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1198 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1199 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1200 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1201 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1202 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1203 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1204 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
1205 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1206 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1207 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1208 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1209 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1210 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1211 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1212 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1213 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1214 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1215 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1216 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1217 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1218 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1219 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1220 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1221 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1222 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1223 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1224 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1225 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1226 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1227 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1228 // CHECK1:       omp.precond.then:
1229 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1230 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1231 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1232 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1233 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1234 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1235 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1236 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1237 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1238 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1239 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1240 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1241 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1242 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1243 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1244 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1245 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1246 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1247 // CHECK1:       cond.true:
1248 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1249 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1250 // CHECK1:       cond.false:
1251 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1252 // CHECK1-NEXT:    br label [[COND_END]]
1253 // CHECK1:       cond.end:
1254 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1255 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1256 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1257 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1258 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1259 // CHECK1:       omp.inner.for.cond:
1260 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1261 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
1262 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1263 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1264 // CHECK1:       omp.inner.for.body:
1265 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1266 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1267 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1268 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
1269 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
1270 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1271 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1272 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1273 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
1274 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
1275 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1276 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1277 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1278 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
1279 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1280 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
1281 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1282 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1283 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1284 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
1285 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
1286 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
1287 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
1288 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
1289 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
1290 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
1291 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
1292 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
1293 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
1294 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1295 // CHECK1:       omp.body.continue:
1296 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1297 // CHECK1:       omp.inner.for.inc:
1298 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1299 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1300 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1301 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
1302 // CHECK1:       omp.inner.for.end:
1303 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1304 // CHECK1:       omp.loop.exit:
1305 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1306 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1307 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1308 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1309 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1310 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1311 // CHECK1:       .omp.final.then:
1312 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1313 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1314 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1315 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1316 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1317 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1318 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1319 // CHECK1:       .omp.final.done:
1320 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1321 // CHECK1:       omp.precond.end:
1322 // CHECK1-NEXT:    ret void
1323 //
1324 //
1325 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
1326 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1327 // CHECK1-NEXT:  entry:
1328 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1329 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1330 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1331 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1332 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1333 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1334 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1335 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1336 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1337 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1338 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1339 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1340 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1341 // CHECK1-NEXT:    ret void
1342 //
1343 //
1344 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
1345 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1346 // CHECK1-NEXT:  entry:
1347 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1348 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1349 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1350 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1351 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1352 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1353 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1354 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1355 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1356 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1357 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1358 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1359 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1360 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1361 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1362 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1363 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1364 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1365 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1366 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1367 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1368 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1369 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1370 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1371 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1372 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1373 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1374 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1375 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1376 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
1377 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1378 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1379 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
1380 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1381 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1382 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1383 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1384 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1385 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
1386 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1387 // CHECK1:       omp.precond.then:
1388 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1389 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1390 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
1391 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1392 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1393 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1394 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1395 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1396 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
1397 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1398 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1399 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1400 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1401 // CHECK1:       cond.true:
1402 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1403 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1404 // CHECK1:       cond.false:
1405 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1406 // CHECK1-NEXT:    br label [[COND_END]]
1407 // CHECK1:       cond.end:
1408 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1409 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1410 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1411 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1412 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1413 // CHECK1:       omp.inner.for.cond:
1414 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1415 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1416 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
1417 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
1418 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1419 // CHECK1:       omp.inner.for.body:
1420 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1421 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1422 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1423 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1424 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
1425 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1426 // CHECK1:       omp.inner.for.inc:
1427 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1428 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1429 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
1430 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1431 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1432 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1433 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1434 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1435 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1436 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1437 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
1438 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1439 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1440 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1441 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
1442 // CHECK1-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
1443 // CHECK1:       cond.true10:
1444 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1445 // CHECK1-NEXT:    br label [[COND_END12:%.*]]
1446 // CHECK1:       cond.false11:
1447 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1448 // CHECK1-NEXT:    br label [[COND_END12]]
1449 // CHECK1:       cond.end12:
1450 // CHECK1-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
1451 // CHECK1-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1452 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1453 // CHECK1-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1454 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
1455 // CHECK1:       omp.inner.for.end:
1456 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1457 // CHECK1:       omp.loop.exit:
1458 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1459 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
1460 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
1461 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1462 // CHECK1-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1463 // CHECK1-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1464 // CHECK1:       .omp.final.then:
1465 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1466 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
1467 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
1468 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
1469 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
1470 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
1471 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1472 // CHECK1:       .omp.final.done:
1473 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1474 // CHECK1:       omp.precond.end:
1475 // CHECK1-NEXT:    ret void
1476 //
1477 //
1478 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
1479 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1480 // CHECK1-NEXT:  entry:
1481 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1482 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1483 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1484 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1485 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1486 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1487 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1488 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1489 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1490 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1491 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1492 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1493 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1494 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1495 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1496 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1497 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1498 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1499 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
1500 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1501 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1502 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1503 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1504 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1505 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1506 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1507 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1508 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1509 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1510 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1511 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1512 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1513 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1514 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1515 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1516 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1517 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1518 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1519 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1520 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1521 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1522 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1523 // CHECK1:       omp.precond.then:
1524 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1525 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1526 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1527 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1528 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1529 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1530 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1531 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1532 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1533 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1534 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1535 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1536 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1537 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1538 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1539 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1540 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1541 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1542 // CHECK1:       cond.true:
1543 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1544 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1545 // CHECK1:       cond.false:
1546 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1547 // CHECK1-NEXT:    br label [[COND_END]]
1548 // CHECK1:       cond.end:
1549 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1550 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1551 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1552 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1553 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1554 // CHECK1:       omp.inner.for.cond:
1555 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1556 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
1557 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1558 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1559 // CHECK1:       omp.inner.for.body:
1560 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1561 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1562 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1563 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
1564 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
1565 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1566 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1567 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1568 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
1569 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
1570 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1571 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1572 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1573 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
1574 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1575 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
1576 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1577 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1578 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1579 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
1580 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
1581 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
1582 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
1583 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
1584 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
1585 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
1586 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
1587 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
1588 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
1589 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1590 // CHECK1:       omp.body.continue:
1591 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1592 // CHECK1:       omp.inner.for.inc:
1593 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1594 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1595 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1596 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1597 // CHECK1:       omp.inner.for.end:
1598 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1599 // CHECK1:       omp.loop.exit:
1600 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1601 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1602 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1603 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1604 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1605 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1606 // CHECK1:       .omp.final.then:
1607 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1608 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1609 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1610 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1611 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1612 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1613 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1614 // CHECK1:       .omp.final.done:
1615 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1616 // CHECK1:       omp.precond.end:
1617 // CHECK1-NEXT:    ret void
1618 //
1619 //
1620 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
1621 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1622 // CHECK1-NEXT:  entry:
1623 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1624 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1625 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1626 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1627 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1628 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1629 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1630 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1631 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1632 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1633 // CHECK1-NEXT:    ret void
1634 //
1635 //
1636 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
1637 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1638 // CHECK1-NEXT:  entry:
1639 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1640 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1641 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1642 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1643 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1644 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1645 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1646 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1647 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1648 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1649 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1650 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1651 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1652 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1653 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1654 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1655 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1656 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1657 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1658 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1659 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1660 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1661 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1662 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1663 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1664 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1665 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1666 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1667 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1668 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1669 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1670 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1671 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1672 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1673 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1674 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1675 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1676 // CHECK1:       omp.precond.then:
1677 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1678 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1679 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1680 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1681 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1682 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1683 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1684 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1685 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1686 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1687 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1688 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1689 // CHECK1:       cond.true:
1690 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1691 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1692 // CHECK1:       cond.false:
1693 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1694 // CHECK1-NEXT:    br label [[COND_END]]
1695 // CHECK1:       cond.end:
1696 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1697 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1698 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1699 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1700 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1701 // CHECK1:       omp.inner.for.cond:
1702 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1703 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1704 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1705 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1706 // CHECK1:       omp.inner.for.body:
1707 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
1708 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1709 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1710 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1711 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
1712 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1713 // CHECK1:       omp.inner.for.inc:
1714 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1715 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
1716 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1717 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1718 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1719 // CHECK1:       omp.inner.for.end:
1720 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1721 // CHECK1:       omp.loop.exit:
1722 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1723 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1724 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1725 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1726 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1727 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1728 // CHECK1:       .omp.final.then:
1729 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1730 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1731 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1732 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1733 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1734 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1735 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1736 // CHECK1:       .omp.final.done:
1737 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1738 // CHECK1:       omp.precond.end:
1739 // CHECK1-NEXT:    ret void
1740 //
1741 //
1742 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
1743 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1744 // CHECK1-NEXT:  entry:
1745 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1746 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1747 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1748 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1749 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1750 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1751 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1752 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1753 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1754 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1755 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1756 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1757 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1758 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1759 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1760 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1761 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1762 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1763 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
1764 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1765 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1766 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1767 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1768 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1769 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1770 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1771 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1772 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1773 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1774 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1775 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1776 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1777 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1778 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1779 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1780 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1781 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1782 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1783 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1784 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1785 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1786 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1787 // CHECK1:       omp.precond.then:
1788 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1789 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1790 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1791 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1792 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1793 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1794 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1795 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1796 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1797 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1798 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1799 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1800 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1801 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1802 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1803 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1804 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1805 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1806 // CHECK1:       cond.true:
1807 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1808 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1809 // CHECK1:       cond.false:
1810 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1811 // CHECK1-NEXT:    br label [[COND_END]]
1812 // CHECK1:       cond.end:
1813 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1814 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1815 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1816 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1817 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1818 // CHECK1:       omp.inner.for.cond:
1819 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1820 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
1821 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1822 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1823 // CHECK1:       omp.inner.for.body:
1824 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1825 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1826 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1827 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
1828 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
1829 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1830 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1831 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1832 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
1833 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
1834 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1835 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1836 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1837 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
1838 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1839 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
1840 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1841 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1842 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1843 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
1844 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
1845 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
1846 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
1847 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
1848 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
1849 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
1850 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
1851 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
1852 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
1853 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1854 // CHECK1:       omp.body.continue:
1855 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1856 // CHECK1:       omp.inner.for.inc:
1857 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1858 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1859 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1860 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1861 // CHECK1:       omp.inner.for.end:
1862 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1863 // CHECK1:       omp.loop.exit:
1864 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1865 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1866 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1867 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1868 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1869 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1870 // CHECK1:       .omp.final.then:
1871 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1872 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1873 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1874 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1875 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1876 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1877 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1878 // CHECK1:       .omp.final.done:
1879 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1880 // CHECK1:       omp.precond.end:
1881 // CHECK1-NEXT:    ret void
1882 //
1883 //
1884 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
1885 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1886 // CHECK1-NEXT:  entry:
1887 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1888 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1889 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1890 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1891 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1892 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1893 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1894 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1895 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1896 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1897 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1898 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1899 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1900 // CHECK1-NEXT:    ret void
1901 //
1902 //
1903 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
1904 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1905 // CHECK1-NEXT:  entry:
1906 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1907 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1908 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1909 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1910 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1911 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1912 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1913 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1914 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1915 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1916 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1917 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1918 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1919 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1920 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1921 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1922 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1923 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1924 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1925 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1926 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1927 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1928 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1929 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1930 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1931 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1932 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1933 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1934 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1935 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1936 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1937 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
1938 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1939 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
1940 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1941 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1942 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
1943 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1944 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
1945 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1946 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1947 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1948 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
1949 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1950 // CHECK1:       omp.precond.then:
1951 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1952 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1953 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
1954 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1955 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1956 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1957 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1958 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1959 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1960 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1961 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1962 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1963 // CHECK1:       cond.true:
1964 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1965 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1966 // CHECK1:       cond.false:
1967 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1968 // CHECK1-NEXT:    br label [[COND_END]]
1969 // CHECK1:       cond.end:
1970 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1971 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1972 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1973 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1974 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1975 // CHECK1:       omp.inner.for.cond:
1976 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1977 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1978 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1979 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1980 // CHECK1:       omp.inner.for.body:
1981 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
1982 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1983 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1984 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1985 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
1986 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
1987 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
1988 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
1989 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
1990 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1991 // CHECK1:       omp.inner.for.inc:
1992 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1993 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
1994 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1995 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1996 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
1997 // CHECK1:       omp.inner.for.end:
1998 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1999 // CHECK1:       omp.loop.exit:
2000 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2001 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2002 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2003 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2004 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2005 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2006 // CHECK1:       .omp.final.then:
2007 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2008 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2009 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2010 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2011 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2012 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2013 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2014 // CHECK1:       .omp.final.done:
2015 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2016 // CHECK1:       omp.precond.end:
2017 // CHECK1-NEXT:    ret void
2018 //
2019 //
2020 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
2021 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2022 // CHECK1-NEXT:  entry:
2023 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2024 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2025 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2026 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2027 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2028 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2029 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2030 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2031 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2032 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2033 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2034 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2035 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2036 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2037 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2038 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2039 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2040 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2041 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2042 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
2043 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2044 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2045 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2046 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2047 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2048 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2049 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2050 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2051 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2052 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2053 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2054 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2055 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2056 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2057 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2058 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2059 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2060 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2061 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2062 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2063 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2064 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2065 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2066 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2067 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2068 // CHECK1:       omp.precond.then:
2069 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2070 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2071 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2072 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2073 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2074 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2075 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2076 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2077 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2078 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2079 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2080 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
2081 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2082 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2083 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
2084 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2085 // CHECK1:       omp.dispatch.cond:
2086 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2087 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2088 // CHECK1-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
2089 // CHECK1-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
2090 // CHECK1-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2091 // CHECK1:       cond.true:
2092 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2093 // CHECK1-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
2094 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2095 // CHECK1:       cond.false:
2096 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2097 // CHECK1-NEXT:    br label [[COND_END]]
2098 // CHECK1:       cond.end:
2099 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
2100 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2101 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2102 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2103 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2104 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2105 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2106 // CHECK1-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2107 // CHECK1:       omp.dispatch.body:
2108 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2109 // CHECK1:       omp.inner.for.cond:
2110 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2111 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
2112 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
2113 // CHECK1-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2114 // CHECK1:       omp.inner.for.body:
2115 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2116 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
2117 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2118 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
2119 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
2120 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2121 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
2122 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
2123 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
2124 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
2125 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2126 // CHECK1-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
2127 // CHECK1-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
2128 // CHECK1-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !40
2129 // CHECK1-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
2130 // CHECK1-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
2131 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2132 // CHECK1-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
2133 // CHECK1-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
2134 // CHECK1-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !40
2135 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
2136 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
2137 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
2138 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
2139 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
2140 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
2141 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
2142 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
2143 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
2144 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2145 // CHECK1:       omp.body.continue:
2146 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2147 // CHECK1:       omp.inner.for.inc:
2148 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2149 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP35]], 1
2150 // CHECK1-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2151 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
2152 // CHECK1:       omp.inner.for.end:
2153 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2154 // CHECK1:       omp.dispatch.inc:
2155 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2156 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2157 // CHECK1-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
2158 // CHECK1-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
2159 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2160 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2161 // CHECK1-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
2162 // CHECK1-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
2163 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2164 // CHECK1:       omp.dispatch.end:
2165 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2166 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
2167 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
2168 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2169 // CHECK1-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
2170 // CHECK1-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2171 // CHECK1:       .omp.final.then:
2172 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2173 // CHECK1-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP44]], 0
2174 // CHECK1-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
2175 // CHECK1-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
2176 // CHECK1-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
2177 // CHECK1-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
2178 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2179 // CHECK1:       .omp.final.done:
2180 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2181 // CHECK1:       omp.precond.end:
2182 // CHECK1-NEXT:    ret void
2183 //
2184 //
2185 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
2186 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
2187 // CHECK1-NEXT:  entry:
2188 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2189 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2190 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2191 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2192 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2193 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2194 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2195 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2196 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2197 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2198 // CHECK1-NEXT:    ret void
2199 //
2200 //
2201 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18
2202 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2203 // CHECK1-NEXT:  entry:
2204 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2205 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2206 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2207 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2208 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2209 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2210 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2211 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2212 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2213 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2214 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2215 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2216 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2217 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2218 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2219 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
2220 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2221 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2222 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2223 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2224 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2225 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2226 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2227 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2228 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2229 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2230 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2231 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2232 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2233 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2234 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2235 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2236 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2237 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2238 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2239 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2240 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2241 // CHECK1:       omp.precond.then:
2242 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2243 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2244 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2245 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2246 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2247 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2248 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2249 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2250 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2251 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2252 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2253 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2254 // CHECK1:       cond.true:
2255 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2256 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2257 // CHECK1:       cond.false:
2258 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2259 // CHECK1-NEXT:    br label [[COND_END]]
2260 // CHECK1:       cond.end:
2261 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2262 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2263 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2264 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2265 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2266 // CHECK1:       omp.inner.for.cond:
2267 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2268 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2269 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2270 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2271 // CHECK1:       omp.inner.for.body:
2272 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
2273 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2274 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2275 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2276 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
2277 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2278 // CHECK1:       omp.inner.for.inc:
2279 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2280 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
2281 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2282 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2283 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
2284 // CHECK1:       omp.inner.for.end:
2285 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2286 // CHECK1:       omp.loop.exit:
2287 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2288 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2289 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2290 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2291 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2292 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2293 // CHECK1:       .omp.final.then:
2294 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2295 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2296 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2297 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2298 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2299 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2300 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2301 // CHECK1:       .omp.final.done:
2302 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2303 // CHECK1:       omp.precond.end:
2304 // CHECK1-NEXT:    ret void
2305 //
2306 //
2307 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..19
2308 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2309 // CHECK1-NEXT:  entry:
2310 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2311 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2312 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2313 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2314 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2315 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2316 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2317 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2318 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2319 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2320 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2321 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2322 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2323 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2324 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2325 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2326 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2327 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2328 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
2329 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2330 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2331 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2332 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2333 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2334 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2335 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2336 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2337 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2338 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2339 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2340 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2341 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2342 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2343 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2344 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2345 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2346 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2347 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2348 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2349 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2350 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2351 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2352 // CHECK1:       omp.precond.then:
2353 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2354 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2355 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2356 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2357 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2358 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2359 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2360 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2361 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2362 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2363 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2364 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2365 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2366 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2367 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
2368 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
2369 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2370 // CHECK1:       omp.dispatch.cond:
2371 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2372 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
2373 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2374 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
2375 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2376 // CHECK1:       omp.dispatch.body:
2377 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2378 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2379 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2380 // CHECK1:       omp.inner.for.cond:
2381 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2382 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
2383 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2384 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2385 // CHECK1:       omp.inner.for.body:
2386 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2387 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
2388 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2389 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
2390 // CHECK1-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
2391 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2392 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
2393 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
2394 // CHECK1-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
2395 // CHECK1-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
2396 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2397 // CHECK1-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
2398 // CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
2399 // CHECK1-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
2400 // CHECK1-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
2401 // CHECK1-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
2402 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2403 // CHECK1-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
2404 // CHECK1-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
2405 // CHECK1-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
2406 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
2407 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
2408 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
2409 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
2410 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
2411 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
2412 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
2413 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
2414 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
2415 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2416 // CHECK1:       omp.body.continue:
2417 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2418 // CHECK1:       omp.inner.for.inc:
2419 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2420 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
2421 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2422 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
2423 // CHECK1:       omp.inner.for.end:
2424 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2425 // CHECK1:       omp.dispatch.inc:
2426 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2427 // CHECK1:       omp.dispatch.end:
2428 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2429 // CHECK1-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2430 // CHECK1-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2431 // CHECK1:       .omp.final.then:
2432 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2433 // CHECK1-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
2434 // CHECK1-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
2435 // CHECK1-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
2436 // CHECK1-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
2437 // CHECK1-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
2438 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2439 // CHECK1:       .omp.final.done:
2440 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2441 // CHECK1:       omp.precond.end:
2442 // CHECK1-NEXT:    ret void
2443 //
2444 //
2445 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
2446 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
2447 // CHECK1-NEXT:  entry:
2448 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
2449 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2450 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2451 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2452 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2453 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
2454 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2455 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2456 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2457 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2458 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
2459 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2460 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2461 // CHECK1-NEXT:    ret void
2462 //
2463 //
2464 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..22
2465 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2466 // CHECK1-NEXT:  entry:
2467 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2468 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2469 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
2470 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2471 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2472 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2473 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2474 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2475 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2476 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2477 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2478 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2479 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2480 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2481 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2482 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2483 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2484 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2485 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2486 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2487 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2488 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
2489 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2490 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2491 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2492 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2493 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
2494 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2495 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
2496 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
2497 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
2498 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
2499 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
2500 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
2501 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2502 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2503 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
2504 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2505 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2506 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2507 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2508 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2509 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
2510 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2511 // CHECK1:       omp.precond.then:
2512 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2513 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2514 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
2515 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2516 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2517 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2518 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2519 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2520 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2521 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2522 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2523 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2524 // CHECK1:       cond.true:
2525 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2526 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2527 // CHECK1:       cond.false:
2528 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2529 // CHECK1-NEXT:    br label [[COND_END]]
2530 // CHECK1:       cond.end:
2531 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2532 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2533 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2534 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2535 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2536 // CHECK1:       omp.inner.for.cond:
2537 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2538 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2539 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2540 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2541 // CHECK1:       omp.inner.for.body:
2542 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
2543 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2544 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2545 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
2546 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
2547 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2548 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
2549 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
2550 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
2551 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2552 // CHECK1:       omp.inner.for.inc:
2553 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2554 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
2555 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
2556 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2557 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
2558 // CHECK1:       omp.inner.for.end:
2559 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2560 // CHECK1:       omp.loop.exit:
2561 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2562 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2563 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2564 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2565 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2566 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2567 // CHECK1:       .omp.final.then:
2568 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2569 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2570 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2571 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2572 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2573 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2574 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2575 // CHECK1:       .omp.final.done:
2576 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2577 // CHECK1:       omp.precond.end:
2578 // CHECK1-NEXT:    ret void
2579 //
2580 //
2581 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..23
2582 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2583 // CHECK1-NEXT:  entry:
2584 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2585 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2586 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2587 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2588 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2589 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2590 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2591 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2592 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2593 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2594 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2595 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2596 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2597 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2598 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2599 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2600 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2601 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2602 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2603 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
2604 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2605 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2606 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2607 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2608 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2609 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2610 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2611 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2612 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2613 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2614 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2615 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2616 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2617 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2618 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2619 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2620 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2621 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2622 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2623 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2624 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2625 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2626 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2627 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2628 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2629 // CHECK1:       omp.precond.then:
2630 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2631 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2632 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2633 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2634 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2635 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2636 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2637 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2638 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2639 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2640 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2641 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
2642 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2643 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2644 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2645 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
2646 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
2647 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2648 // CHECK1:       omp.dispatch.cond:
2649 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2650 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
2651 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2652 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
2653 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2654 // CHECK1:       omp.dispatch.body:
2655 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2656 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
2657 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2658 // CHECK1:       omp.inner.for.cond:
2659 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2660 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
2661 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
2662 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2663 // CHECK1:       omp.inner.for.body:
2664 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2665 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
2666 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2667 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
2668 // CHECK1-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
2669 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2670 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
2671 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
2672 // CHECK1-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
2673 // CHECK1-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
2674 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2675 // CHECK1-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
2676 // CHECK1-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
2677 // CHECK1-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
2678 // CHECK1-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
2679 // CHECK1-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
2680 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2681 // CHECK1-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
2682 // CHECK1-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
2683 // CHECK1-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
2684 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
2685 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
2686 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
2687 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
2688 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
2689 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
2690 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
2691 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
2692 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
2693 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2694 // CHECK1:       omp.body.continue:
2695 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2696 // CHECK1:       omp.inner.for.inc:
2697 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2698 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
2699 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2700 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
2701 // CHECK1:       omp.inner.for.end:
2702 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2703 // CHECK1:       omp.dispatch.inc:
2704 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2705 // CHECK1:       omp.dispatch.end:
2706 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2707 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2708 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2709 // CHECK1:       .omp.final.then:
2710 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2711 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
2712 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
2713 // CHECK1-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
2714 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
2715 // CHECK1-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
2716 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2717 // CHECK1:       .omp.final.done:
2718 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2719 // CHECK1:       omp.precond.end:
2720 // CHECK1-NEXT:    ret void
2721 //
2722 //
2723 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2724 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
2725 // CHECK1-NEXT:  entry:
2726 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
2727 // CHECK1-NEXT:    ret void
2728 //
2729 //
2730 // CHECK2-LABEL: define {{[^@]+}}@main
2731 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
2732 // CHECK2-NEXT:  entry:
2733 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2734 // CHECK2-NEXT:    [[A:%.*]] = alloca double*, align 8
2735 // CHECK2-NEXT:    [[B:%.*]] = alloca double*, align 8
2736 // CHECK2-NEXT:    [[C:%.*]] = alloca double*, align 8
2737 // CHECK2-NEXT:    [[N:%.*]] = alloca i32, align 4
2738 // CHECK2-NEXT:    [[CH:%.*]] = alloca i32, align 4
2739 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
2740 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2741 // CHECK2-NEXT:    store i32 10000, i32* [[N]], align 4
2742 // CHECK2-NEXT:    store i32 100, i32* [[CH]], align 4
2743 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
2744 // CHECK2-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
2745 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
2746 // CHECK2-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
2747 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
2748 // CHECK2-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
2749 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
2750 // CHECK2-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
2751 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
2752 // CHECK2-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
2753 // CHECK2-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
2754 // CHECK2-NEXT:    ret i32 0
2755 //
2756 //
2757 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
2758 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
2759 // CHECK2-NEXT:  entry:
2760 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2761 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2762 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2763 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2764 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2765 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2766 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2767 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2768 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2769 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2770 // CHECK2-NEXT:    ret void
2771 //
2772 //
2773 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
2774 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2775 // CHECK2-NEXT:  entry:
2776 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2777 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2778 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2779 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2780 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2781 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2782 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2783 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2784 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2785 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2786 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2787 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2788 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2789 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2790 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2791 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
2792 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2793 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2794 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2795 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2796 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2797 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2798 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2799 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2800 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2801 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2802 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2803 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2804 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2805 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2806 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2807 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2808 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2809 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
2810 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2811 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2812 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2813 // CHECK2:       omp.precond.then:
2814 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2815 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2816 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2817 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2818 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2819 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2820 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2821 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2822 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2823 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2824 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2825 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2826 // CHECK2:       cond.true:
2827 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2828 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2829 // CHECK2:       cond.false:
2830 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2831 // CHECK2-NEXT:    br label [[COND_END]]
2832 // CHECK2:       cond.end:
2833 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2834 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2835 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2836 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2837 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2838 // CHECK2:       omp.inner.for.cond:
2839 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2840 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
2841 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2842 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2843 // CHECK2:       omp.inner.for.body:
2844 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
2845 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2846 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
2847 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2848 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
2849 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2850 // CHECK2:       omp.inner.for.inc:
2851 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2852 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
2853 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2854 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2855 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
2856 // CHECK2:       omp.inner.for.end:
2857 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2858 // CHECK2:       omp.loop.exit:
2859 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2860 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2861 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2862 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2863 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2864 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2865 // CHECK2:       .omp.final.then:
2866 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2867 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2868 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2869 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2870 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2871 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2872 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2873 // CHECK2:       .omp.final.done:
2874 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
2875 // CHECK2:       omp.precond.end:
2876 // CHECK2-NEXT:    ret void
2877 //
2878 //
2879 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
2880 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2881 // CHECK2-NEXT:  entry:
2882 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2883 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2884 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2885 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2886 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2887 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2888 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2889 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2890 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2891 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2892 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2893 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2894 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2895 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2896 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2897 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2898 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2899 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
2900 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2901 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2902 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2903 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2904 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2905 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2906 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2907 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2908 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2909 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2910 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2911 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2912 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2913 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2914 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2915 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2916 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2917 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2918 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2919 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2920 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
2921 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2922 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2923 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2924 // CHECK2:       omp.precond.then:
2925 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2926 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2927 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2928 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2929 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2930 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2931 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2932 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2933 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2934 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2935 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2936 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2937 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2938 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2939 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2940 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2941 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2942 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2943 // CHECK2:       cond.true:
2944 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2945 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2946 // CHECK2:       cond.false:
2947 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2948 // CHECK2-NEXT:    br label [[COND_END]]
2949 // CHECK2:       cond.end:
2950 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2951 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2952 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2953 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2954 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2955 // CHECK2:       omp.inner.for.cond:
2956 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2957 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
2958 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2959 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2960 // CHECK2:       omp.inner.for.body:
2961 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2962 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
2963 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2964 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
2965 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
2966 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2967 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
2968 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
2969 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
2970 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
2971 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2972 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
2973 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
2974 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
2975 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
2976 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
2977 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2978 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
2979 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
2980 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
2981 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2982 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
2983 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2984 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
2985 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2986 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
2987 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
2988 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
2989 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
2990 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2991 // CHECK2:       omp.body.continue:
2992 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2993 // CHECK2:       omp.inner.for.inc:
2994 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2995 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
2996 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2997 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
2998 // CHECK2:       omp.inner.for.end:
2999 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3000 // CHECK2:       omp.loop.exit:
3001 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3002 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3003 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3004 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3005 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3006 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3007 // CHECK2:       .omp.final.then:
3008 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3009 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3010 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3011 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3012 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3013 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3014 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3015 // CHECK2:       .omp.final.done:
3016 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3017 // CHECK2:       omp.precond.end:
3018 // CHECK2-NEXT:    ret void
3019 //
3020 //
3021 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
3022 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3023 // CHECK2-NEXT:  entry:
3024 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3025 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3026 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3027 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3028 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3029 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3030 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3031 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3032 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3033 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3034 // CHECK2-NEXT:    ret void
3035 //
3036 //
3037 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
3038 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3039 // CHECK2-NEXT:  entry:
3040 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3041 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3042 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3043 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3044 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3045 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3046 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3047 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3048 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3049 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3050 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3051 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3052 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3053 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3054 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3055 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3056 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3057 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3058 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3059 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3060 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3061 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3062 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3063 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3064 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3065 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3066 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3067 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3068 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3069 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3070 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3071 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3072 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3073 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3074 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3075 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3076 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3077 // CHECK2:       omp.precond.then:
3078 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3079 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3080 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3081 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3082 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3083 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3084 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3085 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3086 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3087 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3088 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3089 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3090 // CHECK2:       cond.true:
3091 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3092 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3093 // CHECK2:       cond.false:
3094 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3095 // CHECK2-NEXT:    br label [[COND_END]]
3096 // CHECK2:       cond.end:
3097 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3098 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3099 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3100 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3101 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3102 // CHECK2:       omp.inner.for.cond:
3103 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3104 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
3105 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3106 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3107 // CHECK2:       omp.inner.for.body:
3108 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
3109 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
3110 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
3111 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3112 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
3113 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3114 // CHECK2:       omp.inner.for.inc:
3115 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3116 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
3117 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3118 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3119 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
3120 // CHECK2:       omp.inner.for.end:
3121 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3122 // CHECK2:       omp.loop.exit:
3123 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3124 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
3125 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
3126 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3127 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3128 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3129 // CHECK2:       .omp.final.then:
3130 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3131 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
3132 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3133 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3134 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3135 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3136 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3137 // CHECK2:       .omp.final.done:
3138 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3139 // CHECK2:       omp.precond.end:
3140 // CHECK2-NEXT:    ret void
3141 //
3142 //
3143 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
3144 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3145 // CHECK2-NEXT:  entry:
3146 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3147 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3148 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3149 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3150 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3151 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3152 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3153 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3154 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3155 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3156 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3157 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3158 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3159 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3160 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3161 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3162 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3163 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3164 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
3165 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3166 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3167 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3168 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3169 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3170 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3171 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3172 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3173 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3174 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3175 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3176 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3177 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3178 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3179 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3180 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3181 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3182 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3183 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3184 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3185 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3186 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3187 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3188 // CHECK2:       omp.precond.then:
3189 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3190 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3191 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3192 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3193 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3194 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3195 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3196 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3197 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3198 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3199 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3200 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3201 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3202 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3203 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3204 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3205 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3206 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3207 // CHECK2:       cond.true:
3208 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3209 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3210 // CHECK2:       cond.false:
3211 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3212 // CHECK2-NEXT:    br label [[COND_END]]
3213 // CHECK2:       cond.end:
3214 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3215 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3216 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3217 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3218 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3219 // CHECK2:       omp.inner.for.cond:
3220 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3221 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
3222 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3223 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3224 // CHECK2:       omp.inner.for.body:
3225 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3226 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3227 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3228 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
3229 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
3230 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3231 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3232 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3233 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
3234 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
3235 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3236 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3237 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3238 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
3239 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3240 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
3241 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3242 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3243 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3244 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
3245 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
3246 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
3247 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
3248 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
3249 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
3250 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
3251 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
3252 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
3253 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
3254 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3255 // CHECK2:       omp.body.continue:
3256 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3257 // CHECK2:       omp.inner.for.inc:
3258 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3259 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3260 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3261 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
3262 // CHECK2:       omp.inner.for.end:
3263 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3264 // CHECK2:       omp.loop.exit:
3265 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3266 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3267 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3268 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3269 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3270 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3271 // CHECK2:       .omp.final.then:
3272 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3273 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3274 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3275 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3276 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3277 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3278 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3279 // CHECK2:       .omp.final.done:
3280 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3281 // CHECK2:       omp.precond.end:
3282 // CHECK2-NEXT:    ret void
3283 //
3284 //
3285 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
3286 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3287 // CHECK2-NEXT:  entry:
3288 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
3289 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3290 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3291 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3292 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3293 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
3294 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3295 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3296 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3297 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3298 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
3299 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3300 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3301 // CHECK2-NEXT:    ret void
3302 //
3303 //
3304 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
3305 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3306 // CHECK2-NEXT:  entry:
3307 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3308 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3309 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
3310 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3311 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3312 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3313 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3314 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3315 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3316 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3317 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3318 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3319 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3320 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3321 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3322 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3323 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3324 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3325 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3326 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
3327 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3328 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3329 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3330 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3331 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
3332 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3333 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
3334 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
3335 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
3336 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
3337 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3338 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3339 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
3340 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3341 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3342 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3343 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3344 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3345 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
3346 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3347 // CHECK2:       omp.precond.then:
3348 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3349 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3350 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
3351 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3352 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3353 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
3354 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3355 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3356 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
3357 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3358 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3359 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3360 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3361 // CHECK2:       cond.true:
3362 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3363 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3364 // CHECK2:       cond.false:
3365 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3366 // CHECK2-NEXT:    br label [[COND_END]]
3367 // CHECK2:       cond.end:
3368 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3369 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3370 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3371 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3372 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3373 // CHECK2:       omp.inner.for.cond:
3374 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3375 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3376 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
3377 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
3378 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3379 // CHECK2:       omp.inner.for.body:
3380 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3381 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3382 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3383 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
3384 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
3385 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3386 // CHECK2:       omp.inner.for.inc:
3387 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3388 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3389 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3390 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3391 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3392 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3393 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3394 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3395 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3396 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3397 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
3398 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3399 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3400 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3401 // CHECK2-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
3402 // CHECK2-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
3403 // CHECK2:       cond.true10:
3404 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3405 // CHECK2-NEXT:    br label [[COND_END12:%.*]]
3406 // CHECK2:       cond.false11:
3407 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3408 // CHECK2-NEXT:    br label [[COND_END12]]
3409 // CHECK2:       cond.end12:
3410 // CHECK2-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
3411 // CHECK2-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3412 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3413 // CHECK2-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3414 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
3415 // CHECK2:       omp.inner.for.end:
3416 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3417 // CHECK2:       omp.loop.exit:
3418 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3419 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
3420 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
3421 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3422 // CHECK2-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
3423 // CHECK2-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3424 // CHECK2:       .omp.final.then:
3425 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3426 // CHECK2-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
3427 // CHECK2-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
3428 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
3429 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
3430 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
3431 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3432 // CHECK2:       .omp.final.done:
3433 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3434 // CHECK2:       omp.precond.end:
3435 // CHECK2-NEXT:    ret void
3436 //
3437 //
3438 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
3439 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3440 // CHECK2-NEXT:  entry:
3441 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3442 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3443 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3444 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3445 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3446 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3447 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3448 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3449 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3450 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3451 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3452 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3453 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3454 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3455 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3456 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3457 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3458 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3459 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
3460 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3461 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3462 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3463 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3464 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3465 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3466 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3467 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3468 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3469 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3470 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3471 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3472 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3473 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3474 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3475 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3476 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3477 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3478 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3479 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3480 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3481 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3482 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3483 // CHECK2:       omp.precond.then:
3484 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3485 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3486 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3487 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3488 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3489 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3490 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3491 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3492 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3493 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3494 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3495 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3496 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3497 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3498 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3499 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3500 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3501 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3502 // CHECK2:       cond.true:
3503 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3504 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3505 // CHECK2:       cond.false:
3506 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3507 // CHECK2-NEXT:    br label [[COND_END]]
3508 // CHECK2:       cond.end:
3509 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3510 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3511 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3512 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3513 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3514 // CHECK2:       omp.inner.for.cond:
3515 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3516 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
3517 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3518 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3519 // CHECK2:       omp.inner.for.body:
3520 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3521 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3522 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3523 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
3524 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
3525 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3526 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3527 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3528 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
3529 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
3530 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3531 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3532 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3533 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
3534 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3535 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
3536 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3537 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3538 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3539 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
3540 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
3541 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
3542 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
3543 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
3544 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
3545 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
3546 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
3547 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
3548 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
3549 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3550 // CHECK2:       omp.body.continue:
3551 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3552 // CHECK2:       omp.inner.for.inc:
3553 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3554 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3555 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3556 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
3557 // CHECK2:       omp.inner.for.end:
3558 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3559 // CHECK2:       omp.loop.exit:
3560 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3561 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3562 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3563 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3564 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3565 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3566 // CHECK2:       .omp.final.then:
3567 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3568 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3569 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3570 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3571 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3572 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3573 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3574 // CHECK2:       .omp.final.done:
3575 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3576 // CHECK2:       omp.precond.end:
3577 // CHECK2-NEXT:    ret void
3578 //
3579 //
3580 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
3581 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3582 // CHECK2-NEXT:  entry:
3583 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3584 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3585 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3586 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3587 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3588 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3589 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3590 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3591 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3592 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3593 // CHECK2-NEXT:    ret void
3594 //
3595 //
3596 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
3597 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3598 // CHECK2-NEXT:  entry:
3599 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3600 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3601 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3602 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3603 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3604 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3605 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3606 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3607 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3608 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3609 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3610 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3611 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3612 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3613 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3614 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3615 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3616 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3617 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3618 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3619 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3620 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3621 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3622 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3623 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3624 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3625 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3626 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3627 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3628 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3629 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3630 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3631 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3632 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3633 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3634 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3635 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3636 // CHECK2:       omp.precond.then:
3637 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3638 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3639 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3640 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3641 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3642 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3643 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3644 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3645 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3646 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3647 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3648 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3649 // CHECK2:       cond.true:
3650 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3651 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3652 // CHECK2:       cond.false:
3653 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3654 // CHECK2-NEXT:    br label [[COND_END]]
3655 // CHECK2:       cond.end:
3656 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3657 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3658 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3659 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3660 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3661 // CHECK2:       omp.inner.for.cond:
3662 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3663 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
3664 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3665 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3666 // CHECK2:       omp.inner.for.body:
3667 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
3668 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
3669 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
3670 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3671 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
3672 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3673 // CHECK2:       omp.inner.for.inc:
3674 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3675 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
3676 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3677 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3678 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
3679 // CHECK2:       omp.inner.for.end:
3680 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3681 // CHECK2:       omp.loop.exit:
3682 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3683 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
3684 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
3685 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3686 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3687 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3688 // CHECK2:       .omp.final.then:
3689 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3690 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
3691 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3692 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3693 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3694 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3695 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3696 // CHECK2:       .omp.final.done:
3697 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3698 // CHECK2:       omp.precond.end:
3699 // CHECK2-NEXT:    ret void
3700 //
3701 //
3702 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
3703 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3704 // CHECK2-NEXT:  entry:
3705 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3706 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3707 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3708 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3709 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3710 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3711 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3712 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3713 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3714 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3715 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3716 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3717 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3718 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3719 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3720 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3721 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3722 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3723 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
3724 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3725 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3726 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3727 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3728 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3729 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3730 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3731 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3732 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3733 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3734 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3735 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3736 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3737 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3738 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3739 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3740 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3741 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3742 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3743 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3744 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3745 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3746 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3747 // CHECK2:       omp.precond.then:
3748 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3749 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3750 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3751 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3752 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3753 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3754 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3755 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3756 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3757 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3758 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3759 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3760 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3761 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3762 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3763 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3764 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3765 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3766 // CHECK2:       cond.true:
3767 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3768 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3769 // CHECK2:       cond.false:
3770 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3771 // CHECK2-NEXT:    br label [[COND_END]]
3772 // CHECK2:       cond.end:
3773 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3774 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3775 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3776 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3777 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3778 // CHECK2:       omp.inner.for.cond:
3779 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3780 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
3781 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3782 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3783 // CHECK2:       omp.inner.for.body:
3784 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3785 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3786 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3787 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
3788 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
3789 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3790 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3791 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3792 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
3793 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
3794 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3795 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3796 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3797 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
3798 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3799 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
3800 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3801 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3802 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3803 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
3804 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
3805 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
3806 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
3807 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
3808 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
3809 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
3810 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
3811 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
3812 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
3813 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3814 // CHECK2:       omp.body.continue:
3815 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3816 // CHECK2:       omp.inner.for.inc:
3817 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3818 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3819 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3820 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
3821 // CHECK2:       omp.inner.for.end:
3822 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3823 // CHECK2:       omp.loop.exit:
3824 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3825 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3826 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3827 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3828 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3829 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3830 // CHECK2:       .omp.final.then:
3831 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3832 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3833 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3834 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3835 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3836 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3837 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3838 // CHECK2:       .omp.final.done:
3839 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3840 // CHECK2:       omp.precond.end:
3841 // CHECK2-NEXT:    ret void
3842 //
3843 //
3844 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
3845 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3846 // CHECK2-NEXT:  entry:
3847 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
3848 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3849 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3850 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3851 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3852 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
3853 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3854 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3855 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3856 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3857 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
3858 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3859 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3860 // CHECK2-NEXT:    ret void
3861 //
3862 //
3863 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14
3864 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3865 // CHECK2-NEXT:  entry:
3866 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3867 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3868 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
3869 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3870 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3871 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3872 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3873 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3874 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3875 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3876 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3877 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3878 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3879 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3880 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3881 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3882 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3883 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3884 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
3885 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3886 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3887 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
3888 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3889 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3890 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3891 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3892 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
3893 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3894 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
3895 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
3896 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
3897 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
3898 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3899 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
3900 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3901 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3902 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
3903 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3904 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3905 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3906 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3907 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3908 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
3909 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3910 // CHECK2:       omp.precond.then:
3911 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3912 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3913 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
3914 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3915 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3916 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3917 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3918 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3919 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3920 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3921 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3922 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3923 // CHECK2:       cond.true:
3924 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3925 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3926 // CHECK2:       cond.false:
3927 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3928 // CHECK2-NEXT:    br label [[COND_END]]
3929 // CHECK2:       cond.end:
3930 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3931 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3932 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3933 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3934 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3935 // CHECK2:       omp.inner.for.cond:
3936 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3937 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
3938 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3939 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3940 // CHECK2:       omp.inner.for.body:
3941 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
3942 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3943 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
3944 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
3945 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
3946 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
3947 // CHECK2-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
3948 // CHECK2-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
3949 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
3950 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3951 // CHECK2:       omp.inner.for.inc:
3952 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3953 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
3954 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3955 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3956 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
3957 // CHECK2:       omp.inner.for.end:
3958 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3959 // CHECK2:       omp.loop.exit:
3960 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3961 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
3962 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
3963 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3964 // CHECK2-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
3965 // CHECK2-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3966 // CHECK2:       .omp.final.then:
3967 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3968 // CHECK2-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
3969 // CHECK2-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
3970 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
3971 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
3972 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
3973 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3974 // CHECK2:       .omp.final.done:
3975 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3976 // CHECK2:       omp.precond.end:
3977 // CHECK2-NEXT:    ret void
3978 //
3979 //
3980 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15
3981 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
3982 // CHECK2-NEXT:  entry:
3983 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3984 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3985 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3986 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3987 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3988 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3989 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3990 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3991 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
3992 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3993 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3994 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3995 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3996 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3997 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3998 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3999 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4000 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4001 // CHECK2-NEXT:    [[I6:%.*]] = alloca i32, align 4
4002 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
4003 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4004 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4005 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4006 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4007 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4008 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4009 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4010 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4011 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
4012 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4013 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4014 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4015 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4016 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
4017 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4018 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4019 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4020 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4021 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4022 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4023 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4024 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4025 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4026 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4027 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4028 // CHECK2:       omp.precond.then:
4029 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4030 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4031 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4032 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4033 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
4034 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4035 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
4036 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
4037 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
4038 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4039 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4040 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
4041 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4042 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
4043 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
4044 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4045 // CHECK2:       omp.dispatch.cond:
4046 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4047 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4048 // CHECK2-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
4049 // CHECK2-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
4050 // CHECK2-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4051 // CHECK2:       cond.true:
4052 // CHECK2-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4053 // CHECK2-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
4054 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4055 // CHECK2:       cond.false:
4056 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4057 // CHECK2-NEXT:    br label [[COND_END]]
4058 // CHECK2:       cond.end:
4059 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
4060 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4061 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4062 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4063 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4064 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4065 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4066 // CHECK2-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4067 // CHECK2:       omp.dispatch.body:
4068 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4069 // CHECK2:       omp.inner.for.cond:
4070 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4071 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
4072 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
4073 // CHECK2-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4074 // CHECK2:       omp.inner.for.body:
4075 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4076 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
4077 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4078 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
4079 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
4080 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4081 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
4082 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
4083 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
4084 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
4085 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4086 // CHECK2-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
4087 // CHECK2-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
4088 // CHECK2-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !40
4089 // CHECK2-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
4090 // CHECK2-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
4091 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4092 // CHECK2-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
4093 // CHECK2-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
4094 // CHECK2-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !40
4095 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
4096 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
4097 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
4098 // CHECK2-NEXT:    store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
4099 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
4100 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
4101 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
4102 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
4103 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
4104 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4105 // CHECK2:       omp.body.continue:
4106 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4107 // CHECK2:       omp.inner.for.inc:
4108 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4109 // CHECK2-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP35]], 1
4110 // CHECK2-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4111 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
4112 // CHECK2:       omp.inner.for.end:
4113 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4114 // CHECK2:       omp.dispatch.inc:
4115 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4116 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4117 // CHECK2-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
4118 // CHECK2-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
4119 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4120 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4121 // CHECK2-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
4122 // CHECK2-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
4123 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4124 // CHECK2:       omp.dispatch.end:
4125 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4126 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
4127 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
4128 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4129 // CHECK2-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
4130 // CHECK2-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4131 // CHECK2:       .omp.final.then:
4132 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4133 // CHECK2-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP44]], 0
4134 // CHECK2-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
4135 // CHECK2-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
4136 // CHECK2-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
4137 // CHECK2-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
4138 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4139 // CHECK2:       .omp.final.done:
4140 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4141 // CHECK2:       omp.precond.end:
4142 // CHECK2-NEXT:    ret void
4143 //
4144 //
4145 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
4146 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4147 // CHECK2-NEXT:  entry:
4148 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
4149 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
4150 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
4151 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
4152 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
4153 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
4154 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
4155 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
4156 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
4157 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4158 // CHECK2-NEXT:    ret void
4159 //
4160 //
4161 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..18
4162 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4163 // CHECK2-NEXT:  entry:
4164 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4165 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4166 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4167 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4168 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4169 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4170 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4171 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4172 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4173 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4174 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4175 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4176 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4177 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4178 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4179 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
4180 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4181 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4182 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4183 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4184 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4185 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4186 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4187 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4188 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4189 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4190 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4191 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4192 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4193 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4194 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4195 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4196 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4197 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4198 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4199 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4200 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4201 // CHECK2:       omp.precond.then:
4202 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4203 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4204 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4205 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4206 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4207 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4208 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4209 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4210 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4211 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4212 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4213 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4214 // CHECK2:       cond.true:
4215 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4216 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4217 // CHECK2:       cond.false:
4218 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4219 // CHECK2-NEXT:    br label [[COND_END]]
4220 // CHECK2:       cond.end:
4221 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4222 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4223 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4224 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4225 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4226 // CHECK2:       omp.inner.for.cond:
4227 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4228 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
4229 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4230 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4231 // CHECK2:       omp.inner.for.body:
4232 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
4233 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
4234 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
4235 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
4236 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
4237 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4238 // CHECK2:       omp.inner.for.inc:
4239 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4240 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
4241 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
4242 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4243 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
4244 // CHECK2:       omp.inner.for.end:
4245 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4246 // CHECK2:       omp.loop.exit:
4247 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4248 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
4249 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
4250 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4251 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4252 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4253 // CHECK2:       .omp.final.then:
4254 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4255 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
4256 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4257 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4258 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4259 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
4260 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4261 // CHECK2:       .omp.final.done:
4262 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4263 // CHECK2:       omp.precond.end:
4264 // CHECK2-NEXT:    ret void
4265 //
4266 //
4267 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..19
4268 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4269 // CHECK2-NEXT:  entry:
4270 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4271 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4272 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4273 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4274 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4275 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4276 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4277 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4278 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4279 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4280 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4281 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4282 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4283 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4284 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4285 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4286 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4287 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
4288 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
4289 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4290 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4291 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4292 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4293 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4294 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4295 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4296 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4297 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4298 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4299 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4300 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4301 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4302 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4303 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4304 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4305 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4306 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4307 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4308 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4309 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4310 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4311 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4312 // CHECK2:       omp.precond.then:
4313 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4314 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4315 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4316 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4317 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
4318 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4319 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
4320 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4321 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
4322 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4323 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4324 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4325 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4326 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4327 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
4328 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
4329 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4330 // CHECK2:       omp.dispatch.cond:
4331 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4332 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
4333 // CHECK2-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4334 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
4335 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4336 // CHECK2:       omp.dispatch.body:
4337 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4338 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4339 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4340 // CHECK2:       omp.inner.for.cond:
4341 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4342 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
4343 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4344 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4345 // CHECK2:       omp.inner.for.body:
4346 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4347 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
4348 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4349 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
4350 // CHECK2-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
4351 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4352 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
4353 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
4354 // CHECK2-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
4355 // CHECK2-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
4356 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4357 // CHECK2-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
4358 // CHECK2-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
4359 // CHECK2-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
4360 // CHECK2-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
4361 // CHECK2-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
4362 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4363 // CHECK2-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
4364 // CHECK2-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
4365 // CHECK2-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
4366 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
4367 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
4368 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
4369 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
4370 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
4371 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
4372 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
4373 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
4374 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
4375 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4376 // CHECK2:       omp.body.continue:
4377 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4378 // CHECK2:       omp.inner.for.inc:
4379 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4380 // CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
4381 // CHECK2-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4382 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
4383 // CHECK2:       omp.inner.for.end:
4384 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4385 // CHECK2:       omp.dispatch.inc:
4386 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4387 // CHECK2:       omp.dispatch.end:
4388 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4389 // CHECK2-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4390 // CHECK2-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4391 // CHECK2:       .omp.final.then:
4392 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4393 // CHECK2-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
4394 // CHECK2-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
4395 // CHECK2-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
4396 // CHECK2-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
4397 // CHECK2-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
4398 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4399 // CHECK2:       .omp.final.done:
4400 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4401 // CHECK2:       omp.precond.end:
4402 // CHECK2-NEXT:    ret void
4403 //
4404 //
4405 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
4406 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4407 // CHECK2-NEXT:  entry:
4408 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
4409 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
4410 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
4411 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
4412 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
4413 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
4414 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
4415 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
4416 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
4417 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
4418 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
4419 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
4420 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4421 // CHECK2-NEXT:    ret void
4422 //
4423 //
4424 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..22
4425 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4426 // CHECK2-NEXT:  entry:
4427 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4428 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4429 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
4430 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4431 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4432 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4433 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4434 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4435 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4436 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4437 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4438 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4439 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4440 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4441 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4442 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4443 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4444 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
4445 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
4446 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4447 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4448 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
4449 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4450 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4451 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4452 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4453 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
4454 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4455 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
4456 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
4457 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
4458 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
4459 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
4460 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
4461 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4462 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4463 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
4464 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4465 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4466 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4467 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4468 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4469 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
4470 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4471 // CHECK2:       omp.precond.then:
4472 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4473 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4474 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
4475 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4476 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4477 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4478 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4479 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4480 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4481 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4482 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4483 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4484 // CHECK2:       cond.true:
4485 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4486 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4487 // CHECK2:       cond.false:
4488 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4489 // CHECK2-NEXT:    br label [[COND_END]]
4490 // CHECK2:       cond.end:
4491 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4492 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4493 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4494 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4495 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4496 // CHECK2:       omp.inner.for.cond:
4497 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4498 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
4499 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4500 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4501 // CHECK2:       omp.inner.for.body:
4502 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
4503 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
4504 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
4505 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
4506 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
4507 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
4508 // CHECK2-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
4509 // CHECK2-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
4510 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
4511 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4512 // CHECK2:       omp.inner.for.inc:
4513 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4514 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
4515 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
4516 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4517 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
4518 // CHECK2:       omp.inner.for.end:
4519 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4520 // CHECK2:       omp.loop.exit:
4521 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4522 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
4523 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
4524 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4525 // CHECK2-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
4526 // CHECK2-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4527 // CHECK2:       .omp.final.then:
4528 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4529 // CHECK2-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
4530 // CHECK2-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
4531 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
4532 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
4533 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
4534 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4535 // CHECK2:       .omp.final.done:
4536 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4537 // CHECK2:       omp.precond.end:
4538 // CHECK2-NEXT:    ret void
4539 //
4540 //
4541 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..23
4542 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
4543 // CHECK2-NEXT:  entry:
4544 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4545 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4546 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4547 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4548 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4549 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4550 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4551 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4552 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
4553 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4554 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4555 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4556 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4557 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4558 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4559 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4560 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4561 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4562 // CHECK2-NEXT:    [[I6:%.*]] = alloca i32, align 4
4563 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
4564 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4565 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4566 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4567 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4568 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4569 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4570 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4571 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4572 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
4573 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4574 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4575 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4576 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4577 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
4578 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4579 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4580 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4581 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4582 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4583 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4584 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4585 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4586 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4587 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4588 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4589 // CHECK2:       omp.precond.then:
4590 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4591 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4592 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4593 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4594 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
4595 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4596 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
4597 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
4598 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
4599 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4600 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4601 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
4602 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4603 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4604 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4605 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
4606 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
4607 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4608 // CHECK2:       omp.dispatch.cond:
4609 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4610 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
4611 // CHECK2-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4612 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
4613 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4614 // CHECK2:       omp.dispatch.body:
4615 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4616 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
4617 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4618 // CHECK2:       omp.inner.for.cond:
4619 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4620 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
4621 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4622 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4623 // CHECK2:       omp.inner.for.body:
4624 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4625 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
4626 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4627 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
4628 // CHECK2-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
4629 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4630 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
4631 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
4632 // CHECK2-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
4633 // CHECK2-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
4634 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4635 // CHECK2-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
4636 // CHECK2-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
4637 // CHECK2-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
4638 // CHECK2-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
4639 // CHECK2-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
4640 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4641 // CHECK2-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
4642 // CHECK2-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
4643 // CHECK2-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
4644 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
4645 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
4646 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
4647 // CHECK2-NEXT:    store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
4648 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
4649 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
4650 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
4651 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
4652 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
4653 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4654 // CHECK2:       omp.body.continue:
4655 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4656 // CHECK2:       omp.inner.for.inc:
4657 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4658 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
4659 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4660 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
4661 // CHECK2:       omp.inner.for.end:
4662 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4663 // CHECK2:       omp.dispatch.inc:
4664 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4665 // CHECK2:       omp.dispatch.end:
4666 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4667 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4668 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4669 // CHECK2:       .omp.final.then:
4670 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4671 // CHECK2-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
4672 // CHECK2-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
4673 // CHECK2-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
4674 // CHECK2-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
4675 // CHECK2-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
4676 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4677 // CHECK2:       .omp.final.done:
4678 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4679 // CHECK2:       omp.precond.end:
4680 // CHECK2-NEXT:    ret void
4681 //
4682 //
4683 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4684 // CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
4685 // CHECK2-NEXT:  entry:
4686 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
4687 // CHECK2-NEXT:    ret void
4688 //
4689 //
4690 // CHECK3-LABEL: define {{[^@]+}}@main
4691 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
4692 // CHECK3-NEXT:  entry:
4693 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4694 // CHECK3-NEXT:    [[A:%.*]] = alloca double*, align 4
4695 // CHECK3-NEXT:    [[B:%.*]] = alloca double*, align 4
4696 // CHECK3-NEXT:    [[C:%.*]] = alloca double*, align 4
4697 // CHECK3-NEXT:    [[N:%.*]] = alloca i32, align 4
4698 // CHECK3-NEXT:    [[CH:%.*]] = alloca i32, align 4
4699 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
4700 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4701 // CHECK3-NEXT:    store i32 10000, i32* [[N]], align 4
4702 // CHECK3-NEXT:    store i32 100, i32* [[CH]], align 4
4703 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4704 // CHECK3-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
4705 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4706 // CHECK3-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
4707 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4708 // CHECK3-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
4709 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4710 // CHECK3-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
4711 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4712 // CHECK3-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
4713 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
4714 // CHECK3-NEXT:    ret i32 0
4715 //
4716 //
4717 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
4718 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
4719 // CHECK3-NEXT:  entry:
4720 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4721 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4722 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4723 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4724 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4725 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4726 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4727 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4728 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4729 // CHECK3-NEXT:    ret void
4730 //
4731 //
4732 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
4733 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4734 // CHECK3-NEXT:  entry:
4735 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4736 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4737 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4738 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4739 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4740 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4741 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4742 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4743 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4744 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4745 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4746 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4747 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4748 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4749 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4750 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4751 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4752 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4753 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4754 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4755 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4756 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4757 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4758 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4759 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4760 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4761 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4762 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4763 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4764 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4765 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4766 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4767 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4768 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4769 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4770 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4771 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4772 // CHECK3:       omp.precond.then:
4773 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4774 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4775 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4776 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4777 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4778 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4779 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4780 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4781 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4782 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4783 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4784 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4785 // CHECK3:       cond.true:
4786 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4787 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4788 // CHECK3:       cond.false:
4789 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4790 // CHECK3-NEXT:    br label [[COND_END]]
4791 // CHECK3:       cond.end:
4792 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4793 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4794 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4795 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4796 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4797 // CHECK3:       omp.inner.for.cond:
4798 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4799 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
4800 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4801 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4802 // CHECK3:       omp.inner.for.body:
4803 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
4804 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
4805 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
4806 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4807 // CHECK3:       omp.inner.for.inc:
4808 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4809 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
4810 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4811 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4812 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
4813 // CHECK3:       omp.inner.for.end:
4814 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4815 // CHECK3:       omp.loop.exit:
4816 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4817 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4818 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4819 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4820 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4821 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4822 // CHECK3:       .omp.final.then:
4823 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4824 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
4825 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4826 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4827 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4828 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
4829 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4830 // CHECK3:       .omp.final.done:
4831 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4832 // CHECK3:       omp.precond.end:
4833 // CHECK3-NEXT:    ret void
4834 //
4835 //
4836 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
4837 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4838 // CHECK3-NEXT:  entry:
4839 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4840 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4841 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4842 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4843 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4844 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4845 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4846 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4847 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4848 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4849 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4850 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4851 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4852 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4853 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4854 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4855 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4856 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4857 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
4858 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4859 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4860 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4861 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4862 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4863 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4864 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4865 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4866 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4867 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4868 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4869 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4870 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4871 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4872 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4873 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4874 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4875 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4876 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4877 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4878 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4879 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4880 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4881 // CHECK3:       omp.precond.then:
4882 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4883 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4884 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4885 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4886 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4887 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4888 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4889 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4890 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4891 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4892 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4893 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4894 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4895 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4896 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4897 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4898 // CHECK3:       cond.true:
4899 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4900 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4901 // CHECK3:       cond.false:
4902 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4903 // CHECK3-NEXT:    br label [[COND_END]]
4904 // CHECK3:       cond.end:
4905 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4906 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4907 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4908 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4909 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4910 // CHECK3:       omp.inner.for.cond:
4911 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4912 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
4913 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4914 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4915 // CHECK3:       omp.inner.for.body:
4916 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4917 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
4918 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4919 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
4920 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
4921 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4922 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
4923 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
4924 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
4925 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4926 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
4927 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
4928 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
4929 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
4930 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4931 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
4932 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
4933 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
4934 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
4935 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
4936 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
4937 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
4938 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
4939 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
4940 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
4941 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
4942 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4943 // CHECK3:       omp.body.continue:
4944 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4945 // CHECK3:       omp.inner.for.inc:
4946 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4947 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
4948 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4949 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
4950 // CHECK3:       omp.inner.for.end:
4951 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4952 // CHECK3:       omp.loop.exit:
4953 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4954 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
4955 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
4956 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4957 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4958 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4959 // CHECK3:       .omp.final.then:
4960 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4961 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
4962 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
4963 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
4964 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
4965 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
4966 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4967 // CHECK3:       .omp.final.done:
4968 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4969 // CHECK3:       omp.precond.end:
4970 // CHECK3-NEXT:    ret void
4971 //
4972 //
4973 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
4974 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4975 // CHECK3-NEXT:  entry:
4976 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4977 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4978 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4979 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4980 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4981 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4982 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4983 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4984 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4985 // CHECK3-NEXT:    ret void
4986 //
4987 //
4988 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
4989 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4990 // CHECK3-NEXT:  entry:
4991 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4992 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4993 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4994 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4995 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4996 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4997 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4998 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4999 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5000 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5001 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5002 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5003 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5004 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5005 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5006 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5007 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5008 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5009 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5010 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5011 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5012 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5013 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5014 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5015 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5016 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5017 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5018 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5019 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5020 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5021 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5022 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5023 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5024 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5025 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5026 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5027 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5028 // CHECK3:       omp.precond.then:
5029 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5030 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5031 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5032 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5033 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5034 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5035 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5036 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5037 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5038 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5039 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5040 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5041 // CHECK3:       cond.true:
5042 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5043 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5044 // CHECK3:       cond.false:
5045 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5046 // CHECK3-NEXT:    br label [[COND_END]]
5047 // CHECK3:       cond.end:
5048 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5049 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5050 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5051 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5052 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5053 // CHECK3:       omp.inner.for.cond:
5054 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5055 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
5056 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5057 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5058 // CHECK3:       omp.inner.for.body:
5059 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
5060 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
5061 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
5062 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5063 // CHECK3:       omp.inner.for.inc:
5064 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5065 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
5066 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
5067 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5068 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
5069 // CHECK3:       omp.inner.for.end:
5070 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5071 // CHECK3:       omp.loop.exit:
5072 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5073 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
5074 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
5075 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5076 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5077 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5078 // CHECK3:       .omp.final.then:
5079 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5080 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
5081 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5082 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5083 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5084 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5085 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5086 // CHECK3:       .omp.final.done:
5087 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5088 // CHECK3:       omp.precond.end:
5089 // CHECK3-NEXT:    ret void
5090 //
5091 //
5092 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
5093 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5094 // CHECK3-NEXT:  entry:
5095 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5096 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5097 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5098 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5099 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5100 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5101 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5102 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5103 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5104 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5105 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5106 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5107 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5108 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5109 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5110 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5111 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5112 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5113 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
5114 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5115 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5116 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5117 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5118 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5119 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5120 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5121 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5122 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5123 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5124 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5125 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5126 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5127 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5128 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5129 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5130 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5131 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5132 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5133 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5134 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5135 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5136 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5137 // CHECK3:       omp.precond.then:
5138 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5139 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5140 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5141 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5142 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5143 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5144 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5145 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5146 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5147 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5148 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5149 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5150 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5151 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5152 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5153 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5154 // CHECK3:       cond.true:
5155 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5156 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5157 // CHECK3:       cond.false:
5158 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5159 // CHECK3-NEXT:    br label [[COND_END]]
5160 // CHECK3:       cond.end:
5161 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5162 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5163 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5164 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5165 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5166 // CHECK3:       omp.inner.for.cond:
5167 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5168 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
5169 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5170 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5171 // CHECK3:       omp.inner.for.body:
5172 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5173 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5174 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5175 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
5176 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
5177 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5178 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5179 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
5180 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
5181 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5182 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5183 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
5184 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5185 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
5186 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5187 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5188 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
5189 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
5190 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
5191 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
5192 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
5193 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
5194 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
5195 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
5196 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
5197 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
5198 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5199 // CHECK3:       omp.body.continue:
5200 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5201 // CHECK3:       omp.inner.for.inc:
5202 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5203 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5204 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5205 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
5206 // CHECK3:       omp.inner.for.end:
5207 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5208 // CHECK3:       omp.loop.exit:
5209 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5210 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5211 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5212 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5213 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5214 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5215 // CHECK3:       .omp.final.then:
5216 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5217 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5218 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5219 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5220 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5221 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5222 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5223 // CHECK3:       .omp.final.done:
5224 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5225 // CHECK3:       omp.precond.end:
5226 // CHECK3-NEXT:    ret void
5227 //
5228 //
5229 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
5230 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5231 // CHECK3-NEXT:  entry:
5232 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
5233 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5234 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5235 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5236 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5237 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
5238 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5239 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5240 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5241 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5242 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5243 // CHECK3-NEXT:    ret void
5244 //
5245 //
5246 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
5247 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5248 // CHECK3-NEXT:  entry:
5249 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5250 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5251 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
5252 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5253 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5254 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5255 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5256 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5257 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5258 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5259 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5260 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5261 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5262 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5263 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5264 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5265 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5266 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5267 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5268 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
5269 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5270 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5271 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5272 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5273 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
5274 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5275 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
5276 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
5277 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
5278 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
5279 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5280 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5281 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
5282 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5283 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5284 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5285 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5286 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5287 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
5288 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5289 // CHECK3:       omp.precond.then:
5290 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5291 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5292 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
5293 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5294 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5295 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
5296 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5297 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5298 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
5299 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5300 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5301 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5302 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5303 // CHECK3:       cond.true:
5304 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5305 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5306 // CHECK3:       cond.false:
5307 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5308 // CHECK3-NEXT:    br label [[COND_END]]
5309 // CHECK3:       cond.end:
5310 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5311 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5312 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5313 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5314 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5315 // CHECK3:       omp.inner.for.cond:
5316 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5317 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5318 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
5319 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
5320 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5321 // CHECK3:       omp.inner.for.body:
5322 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5323 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5324 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
5325 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5326 // CHECK3:       omp.inner.for.inc:
5327 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5328 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5329 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5330 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5331 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5332 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5333 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5334 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5335 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5336 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5337 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
5338 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5339 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5340 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5341 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
5342 // CHECK3-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
5343 // CHECK3:       cond.true10:
5344 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5345 // CHECK3-NEXT:    br label [[COND_END12:%.*]]
5346 // CHECK3:       cond.false11:
5347 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5348 // CHECK3-NEXT:    br label [[COND_END12]]
5349 // CHECK3:       cond.end12:
5350 // CHECK3-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
5351 // CHECK3-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5352 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5353 // CHECK3-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5354 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
5355 // CHECK3:       omp.inner.for.end:
5356 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5357 // CHECK3:       omp.loop.exit:
5358 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5359 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
5360 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
5361 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5362 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
5363 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5364 // CHECK3:       .omp.final.then:
5365 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5366 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
5367 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
5368 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
5369 // CHECK3-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
5370 // CHECK3-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
5371 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5372 // CHECK3:       .omp.final.done:
5373 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5374 // CHECK3:       omp.precond.end:
5375 // CHECK3-NEXT:    ret void
5376 //
5377 //
5378 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
5379 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5380 // CHECK3-NEXT:  entry:
5381 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5382 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5383 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5384 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5385 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5386 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5387 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5388 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5389 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5390 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5391 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5392 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5393 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5394 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5395 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5396 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5397 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5398 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5399 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
5400 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5401 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5402 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5403 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5404 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5405 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5406 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5407 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5408 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5409 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5410 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5411 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5412 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5413 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5414 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5415 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5416 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5417 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5418 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5419 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5420 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5421 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5422 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5423 // CHECK3:       omp.precond.then:
5424 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5425 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5426 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5427 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5428 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5429 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5430 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5431 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5432 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5433 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5434 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5435 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5436 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5437 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5438 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5439 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5440 // CHECK3:       cond.true:
5441 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5442 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5443 // CHECK3:       cond.false:
5444 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5445 // CHECK3-NEXT:    br label [[COND_END]]
5446 // CHECK3:       cond.end:
5447 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5448 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5449 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5450 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5451 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5452 // CHECK3:       omp.inner.for.cond:
5453 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5454 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
5455 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5456 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5457 // CHECK3:       omp.inner.for.body:
5458 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5459 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5460 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5461 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
5462 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
5463 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5464 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5465 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
5466 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
5467 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5468 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5469 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
5470 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5471 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
5472 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5473 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5474 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
5475 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
5476 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
5477 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
5478 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
5479 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
5480 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
5481 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
5482 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
5483 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
5484 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5485 // CHECK3:       omp.body.continue:
5486 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5487 // CHECK3:       omp.inner.for.inc:
5488 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5489 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5490 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5491 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
5492 // CHECK3:       omp.inner.for.end:
5493 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5494 // CHECK3:       omp.loop.exit:
5495 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5496 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5497 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5498 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5499 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5500 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5501 // CHECK3:       .omp.final.then:
5502 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5503 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5504 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5505 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5506 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5507 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5508 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5509 // CHECK3:       .omp.final.done:
5510 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5511 // CHECK3:       omp.precond.end:
5512 // CHECK3-NEXT:    ret void
5513 //
5514 //
5515 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
5516 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5517 // CHECK3-NEXT:  entry:
5518 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5519 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5520 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5521 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5522 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5523 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5524 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5525 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5526 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5527 // CHECK3-NEXT:    ret void
5528 //
5529 //
5530 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
5531 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5532 // CHECK3-NEXT:  entry:
5533 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5534 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5535 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5536 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5537 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5538 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5539 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5540 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5541 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5542 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5543 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5544 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5545 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5546 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5547 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5548 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5549 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5550 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5551 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5552 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5553 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5554 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5555 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5556 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5557 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5558 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5559 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5560 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5561 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5562 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5563 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5564 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5565 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5566 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5567 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5568 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5569 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5570 // CHECK3:       omp.precond.then:
5571 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5572 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5573 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5574 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5575 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5576 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5577 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5578 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5579 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5580 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5581 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5582 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5583 // CHECK3:       cond.true:
5584 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5585 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5586 // CHECK3:       cond.false:
5587 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5588 // CHECK3-NEXT:    br label [[COND_END]]
5589 // CHECK3:       cond.end:
5590 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5591 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5592 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5593 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5594 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5595 // CHECK3:       omp.inner.for.cond:
5596 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5597 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5598 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5599 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5600 // CHECK3:       omp.inner.for.body:
5601 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5602 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5603 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
5604 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5605 // CHECK3:       omp.inner.for.inc:
5606 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5607 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5608 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
5609 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5610 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
5611 // CHECK3:       omp.inner.for.end:
5612 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5613 // CHECK3:       omp.loop.exit:
5614 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5615 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
5616 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
5617 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5618 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5619 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5620 // CHECK3:       .omp.final.then:
5621 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5622 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
5623 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5624 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5625 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5626 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5627 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5628 // CHECK3:       .omp.final.done:
5629 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5630 // CHECK3:       omp.precond.end:
5631 // CHECK3-NEXT:    ret void
5632 //
5633 //
5634 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
5635 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5636 // CHECK3-NEXT:  entry:
5637 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5638 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5639 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5640 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5641 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5642 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5643 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5644 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5645 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5646 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5647 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5648 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5649 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5650 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5651 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5652 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5653 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5654 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5655 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
5656 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5657 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5658 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5659 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5660 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5661 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5662 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5663 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5664 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5665 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5666 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5667 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5668 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5669 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5670 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5671 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5672 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5673 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5674 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5675 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5676 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5677 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5678 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5679 // CHECK3:       omp.precond.then:
5680 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5681 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5682 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5683 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5684 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5685 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5686 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5687 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5688 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5689 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5690 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5691 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5692 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5693 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5694 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5695 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5696 // CHECK3:       cond.true:
5697 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5698 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5699 // CHECK3:       cond.false:
5700 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5701 // CHECK3-NEXT:    br label [[COND_END]]
5702 // CHECK3:       cond.end:
5703 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5704 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5705 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5706 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5707 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5708 // CHECK3:       omp.inner.for.cond:
5709 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5710 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
5711 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5712 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5713 // CHECK3:       omp.inner.for.body:
5714 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5715 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5716 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5717 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
5718 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
5719 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5720 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5721 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
5722 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
5723 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5724 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5725 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
5726 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5727 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
5728 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5729 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5730 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
5731 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
5732 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
5733 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
5734 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
5735 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
5736 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
5737 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
5738 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
5739 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
5740 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5741 // CHECK3:       omp.body.continue:
5742 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5743 // CHECK3:       omp.inner.for.inc:
5744 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5745 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5746 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5747 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
5748 // CHECK3:       omp.inner.for.end:
5749 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5750 // CHECK3:       omp.loop.exit:
5751 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5752 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5753 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5754 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5755 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5756 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5757 // CHECK3:       .omp.final.then:
5758 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5759 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5760 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5761 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5762 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5763 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5764 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5765 // CHECK3:       .omp.final.done:
5766 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5767 // CHECK3:       omp.precond.end:
5768 // CHECK3-NEXT:    ret void
5769 //
5770 //
5771 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
5772 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5773 // CHECK3-NEXT:  entry:
5774 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
5775 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5776 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5777 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5778 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5779 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
5780 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5781 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5782 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5783 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5784 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5785 // CHECK3-NEXT:    ret void
5786 //
5787 //
5788 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
5789 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5790 // CHECK3-NEXT:  entry:
5791 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5792 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5793 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
5794 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5795 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5796 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5797 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5798 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5799 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5800 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5801 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5802 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5803 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5804 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5805 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5806 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5807 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5808 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
5809 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
5810 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5811 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5812 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
5813 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5814 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5815 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5816 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5817 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
5818 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5819 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
5820 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
5821 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
5822 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
5823 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5824 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
5825 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5826 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5827 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
5828 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5829 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5830 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5831 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5832 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5833 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
5834 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5835 // CHECK3:       omp.precond.then:
5836 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5837 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5838 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
5839 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5840 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5841 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5842 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5843 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5844 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5845 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5846 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5847 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5848 // CHECK3:       cond.true:
5849 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5850 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5851 // CHECK3:       cond.false:
5852 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5853 // CHECK3-NEXT:    br label [[COND_END]]
5854 // CHECK3:       cond.end:
5855 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5856 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5857 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5858 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5859 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5860 // CHECK3:       omp.inner.for.cond:
5861 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5862 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
5863 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5864 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5865 // CHECK3:       omp.inner.for.body:
5866 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
5867 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
5868 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
5869 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
5870 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
5871 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
5872 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5873 // CHECK3:       omp.inner.for.inc:
5874 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5875 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
5876 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5877 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5878 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
5879 // CHECK3:       omp.inner.for.end:
5880 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5881 // CHECK3:       omp.loop.exit:
5882 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5883 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
5884 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
5885 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5886 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
5887 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5888 // CHECK3:       .omp.final.then:
5889 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5890 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
5891 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
5892 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
5893 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
5894 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
5895 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5896 // CHECK3:       .omp.final.done:
5897 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5898 // CHECK3:       omp.precond.end:
5899 // CHECK3-NEXT:    ret void
5900 //
5901 //
5902 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
5903 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
5904 // CHECK3-NEXT:  entry:
5905 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5906 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5907 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5908 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5909 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5910 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5911 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5912 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5913 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
5914 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5915 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5916 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5917 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5918 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5919 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5920 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5921 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5922 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5923 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
5924 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
5925 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5926 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5927 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5928 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5929 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5930 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5931 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5932 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5933 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5934 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5935 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5936 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5937 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5938 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5939 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5940 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5941 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5942 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5943 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5944 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5945 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5946 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5947 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5948 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5949 // CHECK3:       omp.precond.then:
5950 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5951 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5952 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5953 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5954 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5955 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5956 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5957 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5958 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5959 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5960 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5961 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
5962 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
5963 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5964 // CHECK3:       omp.dispatch.cond:
5965 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5966 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5967 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
5968 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5969 // CHECK3:       cond.true:
5970 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5971 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5972 // CHECK3:       cond.false:
5973 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5974 // CHECK3-NEXT:    br label [[COND_END]]
5975 // CHECK3:       cond.end:
5976 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
5977 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5978 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5979 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
5980 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5981 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5982 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
5983 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5984 // CHECK3:       omp.dispatch.body:
5985 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5986 // CHECK3:       omp.inner.for.cond:
5987 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
5988 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
5989 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
5990 // CHECK3-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5991 // CHECK3:       omp.inner.for.body:
5992 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
5993 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
5994 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5995 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
5996 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
5997 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
5998 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5999 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
6000 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
6001 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6002 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
6003 // CHECK3-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
6004 // CHECK3-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
6005 // CHECK3-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
6006 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6007 // CHECK3-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
6008 // CHECK3-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
6009 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
6010 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
6011 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
6012 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
6013 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
6014 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
6015 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
6016 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
6017 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
6018 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6019 // CHECK3:       omp.body.continue:
6020 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6021 // CHECK3:       omp.inner.for.inc:
6022 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6023 // CHECK3-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
6024 // CHECK3-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6025 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
6026 // CHECK3:       omp.inner.for.end:
6027 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6028 // CHECK3:       omp.dispatch.inc:
6029 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6030 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6031 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
6032 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
6033 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6034 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6035 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
6036 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
6037 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6038 // CHECK3:       omp.dispatch.end:
6039 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6040 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
6041 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
6042 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6043 // CHECK3-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
6044 // CHECK3-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6045 // CHECK3:       .omp.final.then:
6046 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6047 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
6048 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
6049 // CHECK3-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
6050 // CHECK3-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
6051 // CHECK3-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
6052 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6053 // CHECK3:       .omp.final.done:
6054 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6055 // CHECK3:       omp.precond.end:
6056 // CHECK3-NEXT:    ret void
6057 //
6058 //
6059 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
6060 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6061 // CHECK3-NEXT:  entry:
6062 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6063 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6064 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6065 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6066 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6067 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6068 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6069 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6070 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6071 // CHECK3-NEXT:    ret void
6072 //
6073 //
6074 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18
6075 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6076 // CHECK3-NEXT:  entry:
6077 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6078 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6079 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6080 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6081 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6082 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6083 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6084 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6085 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6086 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6087 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6088 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6089 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6090 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6091 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6092 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
6093 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6094 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6095 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6096 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6097 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6098 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6099 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6100 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6101 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6102 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6103 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6104 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6105 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6106 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6107 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6108 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6109 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6110 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6111 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6112 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6113 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6114 // CHECK3:       omp.precond.then:
6115 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6116 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6117 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6118 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6119 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6120 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6121 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6122 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6123 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6124 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6125 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6126 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6127 // CHECK3:       cond.true:
6128 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6129 // CHECK3-NEXT:    br label [[COND_END:%.*]]
6130 // CHECK3:       cond.false:
6131 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6132 // CHECK3-NEXT:    br label [[COND_END]]
6133 // CHECK3:       cond.end:
6134 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6135 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6136 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6137 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6138 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6139 // CHECK3:       omp.inner.for.cond:
6140 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6141 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6142 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6143 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6144 // CHECK3:       omp.inner.for.body:
6145 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
6146 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6147 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
6148 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6149 // CHECK3:       omp.inner.for.inc:
6150 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6151 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
6152 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6153 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6154 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
6155 // CHECK3:       omp.inner.for.end:
6156 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6157 // CHECK3:       omp.loop.exit:
6158 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6159 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6160 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6161 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6162 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6163 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6164 // CHECK3:       .omp.final.then:
6165 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6166 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6167 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6168 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6169 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6170 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6171 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6172 // CHECK3:       .omp.final.done:
6173 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6174 // CHECK3:       omp.precond.end:
6175 // CHECK3-NEXT:    ret void
6176 //
6177 //
6178 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..19
6179 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6180 // CHECK3-NEXT:  entry:
6181 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6182 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6183 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6184 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6185 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6186 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6187 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6188 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6189 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6190 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6191 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6192 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6193 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6194 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6195 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6196 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6197 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6198 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
6199 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
6200 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6201 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6202 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6203 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6204 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6205 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6206 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6207 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6208 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6209 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6210 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6211 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6212 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6213 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6214 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6215 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6216 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6217 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6218 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6219 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6220 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6221 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6222 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6223 // CHECK3:       omp.precond.then:
6224 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6225 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6226 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6227 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6228 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6229 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6230 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6231 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6232 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6233 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6234 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6235 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6236 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
6237 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
6238 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6239 // CHECK3:       omp.dispatch.cond:
6240 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6241 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
6242 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6243 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6244 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6245 // CHECK3:       omp.dispatch.body:
6246 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6247 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6248 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6249 // CHECK3:       omp.inner.for.cond:
6250 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6251 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
6252 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6253 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6254 // CHECK3:       omp.inner.for.body:
6255 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6256 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6257 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6258 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
6259 // CHECK3-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
6260 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6261 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
6262 // CHECK3-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
6263 // CHECK3-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
6264 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6265 // CHECK3-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
6266 // CHECK3-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
6267 // CHECK3-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
6268 // CHECK3-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
6269 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6270 // CHECK3-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
6271 // CHECK3-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
6272 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
6273 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
6274 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
6275 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
6276 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
6277 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
6278 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
6279 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
6280 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
6281 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6282 // CHECK3:       omp.body.continue:
6283 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6284 // CHECK3:       omp.inner.for.inc:
6285 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6286 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
6287 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6288 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
6289 // CHECK3:       omp.inner.for.end:
6290 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6291 // CHECK3:       omp.dispatch.inc:
6292 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6293 // CHECK3:       omp.dispatch.end:
6294 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6295 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
6296 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6297 // CHECK3:       .omp.final.then:
6298 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6299 // CHECK3-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
6300 // CHECK3-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
6301 // CHECK3-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
6302 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
6303 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
6304 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6305 // CHECK3:       .omp.final.done:
6306 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6307 // CHECK3:       omp.precond.end:
6308 // CHECK3-NEXT:    ret void
6309 //
6310 //
6311 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
6312 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6313 // CHECK3-NEXT:  entry:
6314 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
6315 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6316 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6317 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6318 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6319 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
6320 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6321 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6322 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6323 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6324 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6325 // CHECK3-NEXT:    ret void
6326 //
6327 //
6328 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..22
6329 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6330 // CHECK3-NEXT:  entry:
6331 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6332 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6333 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
6334 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6335 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6336 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6337 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6338 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6339 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6340 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6341 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6342 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6343 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6344 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6345 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6346 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6347 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6348 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
6349 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
6350 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6351 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6352 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
6353 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6354 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6355 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6356 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6357 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
6358 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6359 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
6360 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
6361 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
6362 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6363 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6364 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6365 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6366 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6367 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6368 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6369 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6370 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6371 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6372 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6373 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6374 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6375 // CHECK3:       omp.precond.then:
6376 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6377 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6378 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6379 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6380 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6381 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6382 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6383 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6384 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6385 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6386 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6387 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6388 // CHECK3:       cond.true:
6389 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6390 // CHECK3-NEXT:    br label [[COND_END:%.*]]
6391 // CHECK3:       cond.false:
6392 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6393 // CHECK3-NEXT:    br label [[COND_END]]
6394 // CHECK3:       cond.end:
6395 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6396 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6397 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6398 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6399 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6400 // CHECK3:       omp.inner.for.cond:
6401 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6402 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6403 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6404 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6405 // CHECK3:       omp.inner.for.body:
6406 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
6407 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6408 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
6409 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
6410 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
6411 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
6412 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6413 // CHECK3:       omp.inner.for.inc:
6414 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6415 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
6416 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
6417 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6418 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
6419 // CHECK3:       omp.inner.for.end:
6420 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6421 // CHECK3:       omp.loop.exit:
6422 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6423 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
6424 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
6425 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6426 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
6427 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6428 // CHECK3:       .omp.final.then:
6429 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6430 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
6431 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6432 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6433 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6434 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
6435 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6436 // CHECK3:       .omp.final.done:
6437 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6438 // CHECK3:       omp.precond.end:
6439 // CHECK3-NEXT:    ret void
6440 //
6441 //
6442 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..23
6443 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
6444 // CHECK3-NEXT:  entry:
6445 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6446 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6447 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6448 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6449 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6450 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6451 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6452 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6453 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
6454 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6455 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6456 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6457 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6458 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6459 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6460 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6461 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6462 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6463 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
6464 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
6465 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6466 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6467 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6468 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6469 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6470 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6471 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6472 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6473 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
6474 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6475 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6476 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6477 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6478 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6479 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6480 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6481 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6482 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6483 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6484 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6485 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6486 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6487 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6488 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6489 // CHECK3:       omp.precond.then:
6490 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6491 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6492 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6493 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6494 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6495 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6496 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6497 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6498 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6499 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
6500 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6501 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6502 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6503 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
6504 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
6505 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6506 // CHECK3:       omp.dispatch.cond:
6507 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6508 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
6509 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6510 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
6511 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6512 // CHECK3:       omp.dispatch.body:
6513 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6514 // CHECK3-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
6515 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6516 // CHECK3:       omp.inner.for.cond:
6517 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6518 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
6519 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
6520 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6521 // CHECK3:       omp.inner.for.body:
6522 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6523 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
6524 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6525 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
6526 // CHECK3-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
6527 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6528 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
6529 // CHECK3-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
6530 // CHECK3-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
6531 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6532 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
6533 // CHECK3-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
6534 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
6535 // CHECK3-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
6536 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6537 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
6538 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
6539 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
6540 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
6541 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
6542 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
6543 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
6544 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
6545 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
6546 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
6547 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
6548 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6549 // CHECK3:       omp.body.continue:
6550 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6551 // CHECK3:       omp.inner.for.inc:
6552 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6553 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
6554 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6555 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
6556 // CHECK3:       omp.inner.for.end:
6557 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6558 // CHECK3:       omp.dispatch.inc:
6559 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6560 // CHECK3:       omp.dispatch.end:
6561 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6562 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
6563 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6564 // CHECK3:       .omp.final.then:
6565 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6566 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
6567 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
6568 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
6569 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
6570 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
6571 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6572 // CHECK3:       .omp.final.done:
6573 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6574 // CHECK3:       omp.precond.end:
6575 // CHECK3-NEXT:    ret void
6576 //
6577 //
6578 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
6579 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
6580 // CHECK3-NEXT:  entry:
6581 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
6582 // CHECK3-NEXT:    ret void
6583 //
6584 //
6585 // CHECK4-LABEL: define {{[^@]+}}@main
6586 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
6587 // CHECK4-NEXT:  entry:
6588 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6589 // CHECK4-NEXT:    [[A:%.*]] = alloca double*, align 4
6590 // CHECK4-NEXT:    [[B:%.*]] = alloca double*, align 4
6591 // CHECK4-NEXT:    [[C:%.*]] = alloca double*, align 4
6592 // CHECK4-NEXT:    [[N:%.*]] = alloca i32, align 4
6593 // CHECK4-NEXT:    [[CH:%.*]] = alloca i32, align 4
6594 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
6595 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6596 // CHECK4-NEXT:    store i32 10000, i32* [[N]], align 4
6597 // CHECK4-NEXT:    store i32 100, i32* [[CH]], align 4
6598 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
6599 // CHECK4-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
6600 // CHECK4-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
6601 // CHECK4-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
6602 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
6603 // CHECK4-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
6604 // CHECK4-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
6605 // CHECK4-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
6606 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
6607 // CHECK4-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
6608 // CHECK4-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
6609 // CHECK4-NEXT:    ret i32 0
6610 //
6611 //
6612 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
6613 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
6614 // CHECK4-NEXT:  entry:
6615 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6616 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6617 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6618 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6619 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6620 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6621 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6622 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6623 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6624 // CHECK4-NEXT:    ret void
6625 //
6626 //
6627 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
6628 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6629 // CHECK4-NEXT:  entry:
6630 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6631 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6632 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6633 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6634 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6635 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6636 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6637 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6638 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6639 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6640 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6641 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6642 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6643 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6644 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6645 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6646 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6647 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6648 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6649 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6650 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6651 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6652 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6653 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6654 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6655 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6656 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6657 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6658 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6659 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6660 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6661 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6662 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6663 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6664 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6665 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6666 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6667 // CHECK4:       omp.precond.then:
6668 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6669 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6670 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6671 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6672 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6673 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6674 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6675 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6676 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6677 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6678 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6679 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6680 // CHECK4:       cond.true:
6681 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6682 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6683 // CHECK4:       cond.false:
6684 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6685 // CHECK4-NEXT:    br label [[COND_END]]
6686 // CHECK4:       cond.end:
6687 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6688 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6689 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6690 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6691 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6692 // CHECK4:       omp.inner.for.cond:
6693 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6694 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
6695 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6696 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6697 // CHECK4:       omp.inner.for.body:
6698 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
6699 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
6700 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
6701 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6702 // CHECK4:       omp.inner.for.inc:
6703 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6704 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
6705 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6706 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6707 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
6708 // CHECK4:       omp.inner.for.end:
6709 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6710 // CHECK4:       omp.loop.exit:
6711 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6712 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6713 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6714 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6715 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6716 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6717 // CHECK4:       .omp.final.then:
6718 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6719 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6720 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6721 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6722 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6723 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6724 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6725 // CHECK4:       .omp.final.done:
6726 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6727 // CHECK4:       omp.precond.end:
6728 // CHECK4-NEXT:    ret void
6729 //
6730 //
6731 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
6732 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6733 // CHECK4-NEXT:  entry:
6734 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6735 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6736 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6737 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6738 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6739 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6740 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6741 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6742 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6743 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6744 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6745 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6746 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6747 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6748 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6749 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6750 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6751 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6752 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
6753 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6754 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6755 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6756 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6757 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6758 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6759 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6760 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6761 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6762 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6763 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6764 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6765 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6766 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6767 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6768 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6769 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6770 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6771 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6772 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6773 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6774 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6775 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6776 // CHECK4:       omp.precond.then:
6777 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6778 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6779 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6780 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6781 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6782 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6783 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6784 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6785 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6786 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6787 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6788 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6789 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6790 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6791 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6792 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6793 // CHECK4:       cond.true:
6794 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6795 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6796 // CHECK4:       cond.false:
6797 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6798 // CHECK4-NEXT:    br label [[COND_END]]
6799 // CHECK4:       cond.end:
6800 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6801 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6802 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6803 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6804 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6805 // CHECK4:       omp.inner.for.cond:
6806 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6807 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
6808 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6809 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6810 // CHECK4:       omp.inner.for.body:
6811 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6812 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6813 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6814 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
6815 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
6816 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6817 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
6818 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
6819 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
6820 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6821 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
6822 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
6823 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
6824 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
6825 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6826 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
6827 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
6828 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
6829 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
6830 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
6831 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
6832 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
6833 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
6834 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
6835 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
6836 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
6837 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6838 // CHECK4:       omp.body.continue:
6839 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6840 // CHECK4:       omp.inner.for.inc:
6841 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6842 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
6843 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6844 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
6845 // CHECK4:       omp.inner.for.end:
6846 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6847 // CHECK4:       omp.loop.exit:
6848 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6849 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
6850 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
6851 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6852 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
6853 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6854 // CHECK4:       .omp.final.then:
6855 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6856 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
6857 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
6858 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
6859 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
6860 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
6861 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6862 // CHECK4:       .omp.final.done:
6863 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6864 // CHECK4:       omp.precond.end:
6865 // CHECK4-NEXT:    ret void
6866 //
6867 //
6868 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
6869 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6870 // CHECK4-NEXT:  entry:
6871 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6872 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6873 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6874 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6875 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6876 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6877 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6878 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6879 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6880 // CHECK4-NEXT:    ret void
6881 //
6882 //
6883 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
6884 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6885 // CHECK4-NEXT:  entry:
6886 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6887 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6888 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6889 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6890 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6891 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6892 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6893 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6894 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6895 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6896 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6897 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6898 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6899 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6900 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6901 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6902 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6903 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6904 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6905 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6906 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6907 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6908 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6909 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6910 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6911 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6912 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6913 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6914 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6915 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6916 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6917 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6918 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6919 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6920 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6921 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6922 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6923 // CHECK4:       omp.precond.then:
6924 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6925 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6926 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6927 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6928 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6929 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6930 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6931 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6932 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6933 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6934 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6935 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6936 // CHECK4:       cond.true:
6937 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6938 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6939 // CHECK4:       cond.false:
6940 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6941 // CHECK4-NEXT:    br label [[COND_END]]
6942 // CHECK4:       cond.end:
6943 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6944 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6945 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6946 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6947 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6948 // CHECK4:       omp.inner.for.cond:
6949 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6950 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
6951 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6952 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6953 // CHECK4:       omp.inner.for.body:
6954 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
6955 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
6956 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
6957 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6958 // CHECK4:       omp.inner.for.inc:
6959 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6960 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
6961 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6962 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6963 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
6964 // CHECK4:       omp.inner.for.end:
6965 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6966 // CHECK4:       omp.loop.exit:
6967 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6968 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6969 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6970 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6971 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6972 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6973 // CHECK4:       .omp.final.then:
6974 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6975 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6976 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6977 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6978 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6979 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6980 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6981 // CHECK4:       .omp.final.done:
6982 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6983 // CHECK4:       omp.precond.end:
6984 // CHECK4-NEXT:    ret void
6985 //
6986 //
6987 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
6988 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6989 // CHECK4-NEXT:  entry:
6990 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6991 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6992 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6993 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6994 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6995 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6996 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6997 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6998 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6999 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7000 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7001 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7002 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7003 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7004 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7005 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7006 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7007 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7008 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
7009 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7010 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7011 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7012 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7013 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7014 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7015 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7016 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7017 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7018 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7019 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7020 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7021 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7022 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7023 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7024 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7025 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7026 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7027 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7028 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7029 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7030 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7031 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7032 // CHECK4:       omp.precond.then:
7033 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7034 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7035 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7036 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7037 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7038 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7039 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7040 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7041 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7042 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7043 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7044 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7045 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7046 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7047 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7048 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7049 // CHECK4:       cond.true:
7050 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7051 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7052 // CHECK4:       cond.false:
7053 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7054 // CHECK4-NEXT:    br label [[COND_END]]
7055 // CHECK4:       cond.end:
7056 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7057 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7058 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7059 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7060 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7061 // CHECK4:       omp.inner.for.cond:
7062 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7063 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
7064 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7065 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7066 // CHECK4:       omp.inner.for.body:
7067 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7068 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7069 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7070 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
7071 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
7072 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7073 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7074 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
7075 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
7076 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7077 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7078 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
7079 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7080 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
7081 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7082 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7083 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
7084 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
7085 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
7086 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
7087 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
7088 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
7089 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
7090 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
7091 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
7092 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
7093 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7094 // CHECK4:       omp.body.continue:
7095 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7096 // CHECK4:       omp.inner.for.inc:
7097 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7098 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7099 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7100 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
7101 // CHECK4:       omp.inner.for.end:
7102 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7103 // CHECK4:       omp.loop.exit:
7104 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7105 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7106 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7107 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7108 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7109 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7110 // CHECK4:       .omp.final.then:
7111 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7112 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7113 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7114 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7115 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7116 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7117 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7118 // CHECK4:       .omp.final.done:
7119 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7120 // CHECK4:       omp.precond.end:
7121 // CHECK4-NEXT:    ret void
7122 //
7123 //
7124 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
7125 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7126 // CHECK4-NEXT:  entry:
7127 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
7128 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7129 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7130 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7131 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7132 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
7133 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7134 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7135 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7136 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7137 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7138 // CHECK4-NEXT:    ret void
7139 //
7140 //
7141 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
7142 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7143 // CHECK4-NEXT:  entry:
7144 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7145 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7146 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
7147 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7148 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7149 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7150 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7151 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7152 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7153 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7154 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7155 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7156 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7157 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7158 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7159 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7160 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7161 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7162 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7163 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
7164 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7165 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7166 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7167 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7168 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
7169 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7170 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
7171 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
7172 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
7173 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
7174 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
7175 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7176 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
7177 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7178 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7179 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7180 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7181 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7182 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
7183 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7184 // CHECK4:       omp.precond.then:
7185 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7186 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7187 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
7188 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7189 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7190 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
7191 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7192 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7193 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
7194 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7195 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7196 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7197 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7198 // CHECK4:       cond.true:
7199 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7200 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7201 // CHECK4:       cond.false:
7202 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7203 // CHECK4-NEXT:    br label [[COND_END]]
7204 // CHECK4:       cond.end:
7205 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7206 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7207 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7208 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7209 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7210 // CHECK4:       omp.inner.for.cond:
7211 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7212 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7213 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
7214 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
7215 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7216 // CHECK4:       omp.inner.for.body:
7217 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7218 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7219 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
7220 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7221 // CHECK4:       omp.inner.for.inc:
7222 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7223 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7224 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7225 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7226 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7227 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7228 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
7229 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7230 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7231 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7232 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
7233 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7234 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7235 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7236 // CHECK4-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
7237 // CHECK4-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
7238 // CHECK4:       cond.true10:
7239 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7240 // CHECK4-NEXT:    br label [[COND_END12:%.*]]
7241 // CHECK4:       cond.false11:
7242 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7243 // CHECK4-NEXT:    br label [[COND_END12]]
7244 // CHECK4:       cond.end12:
7245 // CHECK4-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
7246 // CHECK4-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7247 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7248 // CHECK4-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7249 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
7250 // CHECK4:       omp.inner.for.end:
7251 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7252 // CHECK4:       omp.loop.exit:
7253 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7254 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
7255 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
7256 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7257 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
7258 // CHECK4-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7259 // CHECK4:       .omp.final.then:
7260 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7261 // CHECK4-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
7262 // CHECK4-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7263 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
7264 // CHECK4-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
7265 // CHECK4-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
7266 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7267 // CHECK4:       .omp.final.done:
7268 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7269 // CHECK4:       omp.precond.end:
7270 // CHECK4-NEXT:    ret void
7271 //
7272 //
7273 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
7274 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7275 // CHECK4-NEXT:  entry:
7276 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7277 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7278 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7279 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7280 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7281 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7282 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7283 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7284 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7285 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7286 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7287 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7288 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7289 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7290 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7291 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7292 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7293 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7294 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
7295 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7296 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7297 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7298 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7299 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7300 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7301 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7302 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7303 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7304 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7305 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7306 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7307 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7308 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7309 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7310 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7311 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7312 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7313 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7314 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7315 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7316 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7317 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7318 // CHECK4:       omp.precond.then:
7319 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7320 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7321 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7322 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7323 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7324 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7325 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7326 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7327 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7328 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7329 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7330 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7331 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7332 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7333 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7334 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7335 // CHECK4:       cond.true:
7336 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7337 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7338 // CHECK4:       cond.false:
7339 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7340 // CHECK4-NEXT:    br label [[COND_END]]
7341 // CHECK4:       cond.end:
7342 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7343 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7344 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7345 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7346 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7347 // CHECK4:       omp.inner.for.cond:
7348 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7349 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
7350 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7351 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7352 // CHECK4:       omp.inner.for.body:
7353 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7354 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7355 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7356 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
7357 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
7358 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7359 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7360 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
7361 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
7362 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7363 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7364 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
7365 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7366 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
7367 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7368 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7369 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
7370 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
7371 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
7372 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
7373 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
7374 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
7375 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
7376 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
7377 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
7378 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
7379 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7380 // CHECK4:       omp.body.continue:
7381 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7382 // CHECK4:       omp.inner.for.inc:
7383 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7384 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7385 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7386 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
7387 // CHECK4:       omp.inner.for.end:
7388 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7389 // CHECK4:       omp.loop.exit:
7390 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7391 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7392 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7393 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7394 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7395 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7396 // CHECK4:       .omp.final.then:
7397 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7398 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7399 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7400 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7401 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7402 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7403 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7404 // CHECK4:       .omp.final.done:
7405 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7406 // CHECK4:       omp.precond.end:
7407 // CHECK4-NEXT:    ret void
7408 //
7409 //
7410 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
7411 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7412 // CHECK4-NEXT:  entry:
7413 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7414 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7415 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7416 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7417 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7418 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7419 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7420 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7421 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7422 // CHECK4-NEXT:    ret void
7423 //
7424 //
7425 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
7426 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7427 // CHECK4-NEXT:  entry:
7428 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7429 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7430 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7431 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7432 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7433 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7434 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7435 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7436 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7437 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7438 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7439 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7440 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7441 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7442 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7443 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7444 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7445 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7446 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7447 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7448 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7449 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7450 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7451 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7452 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7453 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7454 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7455 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7456 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7457 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7458 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7459 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7460 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7461 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7462 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7463 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7464 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7465 // CHECK4:       omp.precond.then:
7466 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7467 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7468 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
7469 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7470 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7471 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7472 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7473 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7474 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7475 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7476 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
7477 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7478 // CHECK4:       cond.true:
7479 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7480 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7481 // CHECK4:       cond.false:
7482 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7483 // CHECK4-NEXT:    br label [[COND_END]]
7484 // CHECK4:       cond.end:
7485 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7486 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7487 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7488 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
7489 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7490 // CHECK4:       omp.inner.for.cond:
7491 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7492 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
7493 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
7494 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7495 // CHECK4:       omp.inner.for.body:
7496 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
7497 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
7498 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
7499 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7500 // CHECK4:       omp.inner.for.inc:
7501 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7502 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
7503 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
7504 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7505 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
7506 // CHECK4:       omp.inner.for.end:
7507 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7508 // CHECK4:       omp.loop.exit:
7509 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7510 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
7511 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
7512 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7513 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
7514 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7515 // CHECK4:       .omp.final.then:
7516 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7517 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
7518 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
7519 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
7520 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
7521 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
7522 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7523 // CHECK4:       .omp.final.done:
7524 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7525 // CHECK4:       omp.precond.end:
7526 // CHECK4-NEXT:    ret void
7527 //
7528 //
7529 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
7530 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7531 // CHECK4-NEXT:  entry:
7532 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7533 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7534 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7535 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7536 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7537 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7538 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7539 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7540 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7541 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7542 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7543 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7544 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7545 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7546 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7547 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7548 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7549 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7550 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
7551 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7552 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7553 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7554 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7555 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7556 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7557 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7558 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7559 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7560 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7561 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7562 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7563 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7564 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7565 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7566 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7567 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7568 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7569 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7570 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7571 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7572 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7573 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7574 // CHECK4:       omp.precond.then:
7575 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7576 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7577 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7578 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7579 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7580 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7581 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7582 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7583 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7584 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7585 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7586 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7587 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7588 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7589 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7590 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7591 // CHECK4:       cond.true:
7592 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7593 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7594 // CHECK4:       cond.false:
7595 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7596 // CHECK4-NEXT:    br label [[COND_END]]
7597 // CHECK4:       cond.end:
7598 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7599 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7600 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7601 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7602 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7603 // CHECK4:       omp.inner.for.cond:
7604 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7605 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
7606 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7607 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7608 // CHECK4:       omp.inner.for.body:
7609 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7610 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7611 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7612 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
7613 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
7614 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7615 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7616 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
7617 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
7618 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7619 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7620 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
7621 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7622 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
7623 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7624 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7625 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
7626 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
7627 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
7628 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
7629 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
7630 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
7631 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
7632 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
7633 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
7634 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
7635 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7636 // CHECK4:       omp.body.continue:
7637 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7638 // CHECK4:       omp.inner.for.inc:
7639 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7640 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7641 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7642 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
7643 // CHECK4:       omp.inner.for.end:
7644 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7645 // CHECK4:       omp.loop.exit:
7646 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7647 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7648 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7649 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7650 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7651 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7652 // CHECK4:       .omp.final.then:
7653 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7654 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7655 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7656 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7657 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7658 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7659 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7660 // CHECK4:       .omp.final.done:
7661 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7662 // CHECK4:       omp.precond.end:
7663 // CHECK4-NEXT:    ret void
7664 //
7665 //
7666 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
7667 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7668 // CHECK4-NEXT:  entry:
7669 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
7670 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7671 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7672 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7673 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7674 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
7675 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7676 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7677 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7678 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7679 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7680 // CHECK4-NEXT:    ret void
7681 //
7682 //
7683 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14
7684 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7685 // CHECK4-NEXT:  entry:
7686 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7687 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7688 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
7689 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7690 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7691 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7692 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7693 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7694 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7695 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7696 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7697 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7698 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7699 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7700 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7701 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7702 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7703 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
7704 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
7705 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7706 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7707 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
7708 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7709 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7710 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7711 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7712 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
7713 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7714 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
7715 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
7716 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
7717 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
7718 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
7719 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
7720 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7721 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7722 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
7723 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7724 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7725 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7726 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7727 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7728 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
7729 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7730 // CHECK4:       omp.precond.then:
7731 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7732 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7733 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
7734 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7735 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7736 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7737 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7738 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7739 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7740 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7741 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7742 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7743 // CHECK4:       cond.true:
7744 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7745 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7746 // CHECK4:       cond.false:
7747 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7748 // CHECK4-NEXT:    br label [[COND_END]]
7749 // CHECK4:       cond.end:
7750 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7751 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7752 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7753 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7754 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7755 // CHECK4:       omp.inner.for.cond:
7756 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7757 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
7758 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7759 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7760 // CHECK4:       omp.inner.for.body:
7761 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
7762 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
7763 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
7764 // CHECK4-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
7765 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
7766 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
7767 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7768 // CHECK4:       omp.inner.for.inc:
7769 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7770 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
7771 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
7772 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7773 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
7774 // CHECK4:       omp.inner.for.end:
7775 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7776 // CHECK4:       omp.loop.exit:
7777 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7778 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
7779 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
7780 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7781 // CHECK4-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
7782 // CHECK4-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7783 // CHECK4:       .omp.final.then:
7784 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7785 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
7786 // CHECK4-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
7787 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
7788 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
7789 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
7790 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7791 // CHECK4:       .omp.final.done:
7792 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7793 // CHECK4:       omp.precond.end:
7794 // CHECK4-NEXT:    ret void
7795 //
7796 //
7797 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15
7798 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7799 // CHECK4-NEXT:  entry:
7800 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7801 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7802 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7803 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7804 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7805 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7806 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7807 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7808 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
7809 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7810 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7811 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7812 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7813 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7814 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7815 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7816 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7817 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7818 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
7819 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
7820 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7821 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7822 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7823 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7824 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7825 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7826 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7827 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7828 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
7829 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7830 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7831 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7832 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7833 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7834 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7835 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7836 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7837 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7838 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7839 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7840 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7841 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7842 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7843 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7844 // CHECK4:       omp.precond.then:
7845 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7846 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7847 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7848 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7849 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7850 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7851 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7852 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7853 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7854 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
7855 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7856 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
7857 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
7858 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7859 // CHECK4:       omp.dispatch.cond:
7860 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7861 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7862 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
7863 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7864 // CHECK4:       cond.true:
7865 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7866 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7867 // CHECK4:       cond.false:
7868 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7869 // CHECK4-NEXT:    br label [[COND_END]]
7870 // CHECK4:       cond.end:
7871 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
7872 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7873 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7874 // CHECK4-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
7875 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7876 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7877 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
7878 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7879 // CHECK4:       omp.dispatch.body:
7880 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7881 // CHECK4:       omp.inner.for.cond:
7882 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7883 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
7884 // CHECK4-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
7885 // CHECK4-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7886 // CHECK4:       omp.inner.for.body:
7887 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7888 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
7889 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7890 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
7891 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
7892 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7893 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7894 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
7895 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
7896 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7897 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7898 // CHECK4-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
7899 // CHECK4-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
7900 // CHECK4-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
7901 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7902 // CHECK4-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
7903 // CHECK4-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
7904 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
7905 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
7906 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
7907 // CHECK4-NEXT:    store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
7908 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
7909 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
7910 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
7911 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
7912 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
7913 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7914 // CHECK4:       omp.body.continue:
7915 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7916 // CHECK4:       omp.inner.for.inc:
7917 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7918 // CHECK4-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
7919 // CHECK4-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7920 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
7921 // CHECK4:       omp.inner.for.end:
7922 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7923 // CHECK4:       omp.dispatch.inc:
7924 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7925 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7926 // CHECK4-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
7927 // CHECK4-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
7928 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7929 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7930 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
7931 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
7932 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
7933 // CHECK4:       omp.dispatch.end:
7934 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7935 // CHECK4-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
7936 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
7937 // CHECK4-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7938 // CHECK4-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
7939 // CHECK4-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7940 // CHECK4:       .omp.final.then:
7941 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7942 // CHECK4-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
7943 // CHECK4-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7944 // CHECK4-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
7945 // CHECK4-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
7946 // CHECK4-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
7947 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7948 // CHECK4:       .omp.final.done:
7949 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7950 // CHECK4:       omp.precond.end:
7951 // CHECK4-NEXT:    ret void
7952 //
7953 //
7954 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
7955 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7956 // CHECK4-NEXT:  entry:
7957 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7958 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7959 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7960 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7961 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7962 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7963 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7964 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7965 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7966 // CHECK4-NEXT:    ret void
7967 //
7968 //
7969 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..18
7970 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7971 // CHECK4-NEXT:  entry:
7972 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7973 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7974 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7975 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7976 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7977 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7978 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7979 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7980 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7981 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7982 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7983 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7984 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7985 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7986 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7987 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7988 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7989 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7990 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7991 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7992 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7993 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7994 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7995 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7996 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7997 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7998 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7999 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8000 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8001 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8002 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8003 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8004 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8005 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8006 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8007 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8008 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8009 // CHECK4:       omp.precond.then:
8010 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8011 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8012 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8013 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8014 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8015 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8016 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8017 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8018 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8019 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8020 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8021 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8022 // CHECK4:       cond.true:
8023 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8024 // CHECK4-NEXT:    br label [[COND_END:%.*]]
8025 // CHECK4:       cond.false:
8026 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8027 // CHECK4-NEXT:    br label [[COND_END]]
8028 // CHECK4:       cond.end:
8029 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8030 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8031 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8032 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8033 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8034 // CHECK4:       omp.inner.for.cond:
8035 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8036 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
8037 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8038 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8039 // CHECK4:       omp.inner.for.body:
8040 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
8041 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
8042 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
8043 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8044 // CHECK4:       omp.inner.for.inc:
8045 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8046 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
8047 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
8048 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8049 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
8050 // CHECK4:       omp.inner.for.end:
8051 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8052 // CHECK4:       omp.loop.exit:
8053 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8054 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
8055 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
8056 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8057 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8058 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8059 // CHECK4:       .omp.final.then:
8060 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8061 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
8062 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8063 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8064 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8065 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
8066 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8067 // CHECK4:       .omp.final.done:
8068 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8069 // CHECK4:       omp.precond.end:
8070 // CHECK4-NEXT:    ret void
8071 //
8072 //
8073 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..19
8074 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
8075 // CHECK4-NEXT:  entry:
8076 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8077 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8078 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
8079 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
8080 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8081 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8082 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8083 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8084 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8085 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8086 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8087 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8088 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8089 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8090 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8091 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8092 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8093 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
8094 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
8095 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8096 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8097 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8098 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8099 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8100 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8101 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8102 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8103 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8104 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
8105 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
8106 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
8107 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8108 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8109 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8110 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8111 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8112 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8113 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8114 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8115 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8116 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8117 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8118 // CHECK4:       omp.precond.then:
8119 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8120 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8121 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8122 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8123 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8124 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
8125 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
8126 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8127 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8128 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8129 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8130 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8131 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
8132 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
8133 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8134 // CHECK4:       omp.dispatch.cond:
8135 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8136 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
8137 // CHECK4-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
8138 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
8139 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8140 // CHECK4:       omp.dispatch.body:
8141 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8142 // CHECK4-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
8143 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8144 // CHECK4:       omp.inner.for.cond:
8145 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8146 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
8147 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
8148 // CHECK4-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8149 // CHECK4:       omp.inner.for.body:
8150 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8151 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
8152 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8153 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
8154 // CHECK4-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
8155 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8156 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
8157 // CHECK4-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
8158 // CHECK4-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
8159 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8160 // CHECK4-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
8161 // CHECK4-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
8162 // CHECK4-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
8163 // CHECK4-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
8164 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8165 // CHECK4-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
8166 // CHECK4-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
8167 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
8168 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
8169 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
8170 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
8171 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
8172 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
8173 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
8174 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
8175 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
8176 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8177 // CHECK4:       omp.body.continue:
8178 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8179 // CHECK4:       omp.inner.for.inc:
8180 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8181 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
8182 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8183 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
8184 // CHECK4:       omp.inner.for.end:
8185 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8186 // CHECK4:       omp.dispatch.inc:
8187 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
8188 // CHECK4:       omp.dispatch.end:
8189 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8190 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
8191 // CHECK4-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8192 // CHECK4:       .omp.final.then:
8193 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8194 // CHECK4-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
8195 // CHECK4-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
8196 // CHECK4-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
8197 // CHECK4-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8198 // CHECK4-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
8199 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8200 // CHECK4:       .omp.final.done:
8201 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8202 // CHECK4:       omp.precond.end:
8203 // CHECK4-NEXT:    ret void
8204 //
8205 //
8206 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
8207 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
8208 // CHECK4-NEXT:  entry:
8209 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
8210 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8211 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
8212 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
8213 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
8214 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
8215 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8216 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
8217 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
8218 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
8219 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
8220 // CHECK4-NEXT:    ret void
8221 //
8222 //
8223 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..22
8224 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
8225 // CHECK4-NEXT:  entry:
8226 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8227 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8228 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
8229 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8230 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8231 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8232 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8233 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8234 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8235 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8236 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8237 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8238 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8239 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8240 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8241 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8242 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8243 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
8244 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
8245 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8246 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8247 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
8248 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8249 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8250 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8251 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8252 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
8253 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8254 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
8255 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
8256 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
8257 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
8258 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8259 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
8260 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8261 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8262 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
8263 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8264 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8265 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8266 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8267 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8268 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
8269 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8270 // CHECK4:       omp.precond.then:
8271 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8272 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8273 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
8274 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8275 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8276 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8277 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8278 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8279 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8280 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8281 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8282 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8283 // CHECK4:       cond.true:
8284 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8285 // CHECK4-NEXT:    br label [[COND_END:%.*]]
8286 // CHECK4:       cond.false:
8287 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8288 // CHECK4-NEXT:    br label [[COND_END]]
8289 // CHECK4:       cond.end:
8290 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8291 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8292 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8293 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8294 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8295 // CHECK4:       omp.inner.for.cond:
8296 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8297 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
8298 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8299 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8300 // CHECK4:       omp.inner.for.body:
8301 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
8302 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
8303 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
8304 // CHECK4-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
8305 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
8306 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
8307 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8308 // CHECK4:       omp.inner.for.inc:
8309 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8310 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
8311 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8312 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8313 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
8314 // CHECK4:       omp.inner.for.end:
8315 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8316 // CHECK4:       omp.loop.exit:
8317 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8318 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
8319 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
8320 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8321 // CHECK4-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
8322 // CHECK4-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8323 // CHECK4:       .omp.final.then:
8324 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8325 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
8326 // CHECK4-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
8327 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
8328 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
8329 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
8330 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8331 // CHECK4:       .omp.final.done:
8332 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8333 // CHECK4:       omp.precond.end:
8334 // CHECK4-NEXT:    ret void
8335 //
8336 //
8337 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..23
8338 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8339 // CHECK4-NEXT:  entry:
8340 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8341 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8342 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
8343 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
8344 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8345 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8346 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8347 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8348 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
8349 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8350 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8351 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8352 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8353 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8354 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8355 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8356 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8357 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8358 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
8359 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
8360 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8361 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8362 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8363 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8364 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8365 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8366 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8367 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8368 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
8369 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8370 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
8371 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
8372 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
8373 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8374 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8375 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8376 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8377 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8378 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8379 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8380 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8381 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8382 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8383 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8384 // CHECK4:       omp.precond.then:
8385 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8386 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8387 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8388 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8389 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8390 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
8391 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
8392 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8393 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8394 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
8395 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8396 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8397 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8398 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
8399 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
8400 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8401 // CHECK4:       omp.dispatch.cond:
8402 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8403 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
8404 // CHECK4-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
8405 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
8406 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8407 // CHECK4:       omp.dispatch.body:
8408 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8409 // CHECK4-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
8410 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8411 // CHECK4:       omp.inner.for.cond:
8412 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8413 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
8414 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8415 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8416 // CHECK4:       omp.inner.for.body:
8417 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8418 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
8419 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8420 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
8421 // CHECK4-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
8422 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8423 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
8424 // CHECK4-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
8425 // CHECK4-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
8426 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8427 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
8428 // CHECK4-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
8429 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
8430 // CHECK4-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
8431 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8432 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
8433 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
8434 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
8435 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
8436 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
8437 // CHECK4-NEXT:    store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
8438 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
8439 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
8440 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
8441 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
8442 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
8443 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8444 // CHECK4:       omp.body.continue:
8445 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8446 // CHECK4:       omp.inner.for.inc:
8447 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8448 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
8449 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8450 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
8451 // CHECK4:       omp.inner.for.end:
8452 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8453 // CHECK4:       omp.dispatch.inc:
8454 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
8455 // CHECK4:       omp.dispatch.end:
8456 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8457 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
8458 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8459 // CHECK4:       .omp.final.then:
8460 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8461 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
8462 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
8463 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
8464 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
8465 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
8466 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8467 // CHECK4:       .omp.final.done:
8468 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8469 // CHECK4:       omp.precond.end:
8470 // CHECK4-NEXT:    ret void
8471 //
8472 //
8473 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
8474 // CHECK4-SAME: () #[[ATTR4:[0-9]+]] {
8475 // CHECK4-NEXT:  entry:
8476 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
8477 // CHECK4-NEXT:    ret void
8478 //
8479 //
8480 // CHECK5-LABEL: define {{[^@]+}}@main
8481 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
8482 // CHECK5-NEXT:  entry:
8483 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8484 // CHECK5-NEXT:    [[A:%.*]] = alloca double*, align 8
8485 // CHECK5-NEXT:    [[B:%.*]] = alloca double*, align 8
8486 // CHECK5-NEXT:    [[C:%.*]] = alloca double*, align 8
8487 // CHECK5-NEXT:    [[N:%.*]] = alloca i32, align 4
8488 // CHECK5-NEXT:    [[CH:%.*]] = alloca i32, align 4
8489 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
8490 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8491 // CHECK5-NEXT:    store i32 10000, i32* [[N]], align 4
8492 // CHECK5-NEXT:    store i32 100, i32* [[CH]], align 4
8493 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8494 // CHECK5-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
8495 // CHECK5-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8496 // CHECK5-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
8497 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8498 // CHECK5-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
8499 // CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8500 // CHECK5-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
8501 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8502 // CHECK5-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
8503 // CHECK5-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
8504 // CHECK5-NEXT:    ret i32 0
8505 //
8506 //
8507 // CHECK6-LABEL: define {{[^@]+}}@main
8508 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
8509 // CHECK6-NEXT:  entry:
8510 // CHECK6-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8511 // CHECK6-NEXT:    [[A:%.*]] = alloca double*, align 8
8512 // CHECK6-NEXT:    [[B:%.*]] = alloca double*, align 8
8513 // CHECK6-NEXT:    [[C:%.*]] = alloca double*, align 8
8514 // CHECK6-NEXT:    [[N:%.*]] = alloca i32, align 4
8515 // CHECK6-NEXT:    [[CH:%.*]] = alloca i32, align 4
8516 // CHECK6-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
8517 // CHECK6-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8518 // CHECK6-NEXT:    store i32 10000, i32* [[N]], align 4
8519 // CHECK6-NEXT:    store i32 100, i32* [[CH]], align 4
8520 // CHECK6-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8521 // CHECK6-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
8522 // CHECK6-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8523 // CHECK6-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
8524 // CHECK6-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8525 // CHECK6-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
8526 // CHECK6-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8527 // CHECK6-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
8528 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8529 // CHECK6-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
8530 // CHECK6-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
8531 // CHECK6-NEXT:    ret i32 0
8532 //
8533 //
8534 // CHECK7-LABEL: define {{[^@]+}}@main
8535 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
8536 // CHECK7-NEXT:  entry:
8537 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8538 // CHECK7-NEXT:    [[A:%.*]] = alloca double*, align 4
8539 // CHECK7-NEXT:    [[B:%.*]] = alloca double*, align 4
8540 // CHECK7-NEXT:    [[C:%.*]] = alloca double*, align 4
8541 // CHECK7-NEXT:    [[N:%.*]] = alloca i32, align 4
8542 // CHECK7-NEXT:    [[CH:%.*]] = alloca i32, align 4
8543 // CHECK7-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
8544 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8545 // CHECK7-NEXT:    store i32 10000, i32* [[N]], align 4
8546 // CHECK7-NEXT:    store i32 100, i32* [[CH]], align 4
8547 // CHECK7-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8548 // CHECK7-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
8549 // CHECK7-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8550 // CHECK7-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
8551 // CHECK7-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8552 // CHECK7-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
8553 // CHECK7-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8554 // CHECK7-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
8555 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8556 // CHECK7-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
8557 // CHECK7-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
8558 // CHECK7-NEXT:    ret i32 0
8559 //
8560 //
8561 // CHECK8-LABEL: define {{[^@]+}}@main
8562 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
8563 // CHECK8-NEXT:  entry:
8564 // CHECK8-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8565 // CHECK8-NEXT:    [[A:%.*]] = alloca double*, align 4
8566 // CHECK8-NEXT:    [[B:%.*]] = alloca double*, align 4
8567 // CHECK8-NEXT:    [[C:%.*]] = alloca double*, align 4
8568 // CHECK8-NEXT:    [[N:%.*]] = alloca i32, align 4
8569 // CHECK8-NEXT:    [[CH:%.*]] = alloca i32, align 4
8570 // CHECK8-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
8571 // CHECK8-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8572 // CHECK8-NEXT:    store i32 10000, i32* [[N]], align 4
8573 // CHECK8-NEXT:    store i32 100, i32* [[CH]], align 4
8574 // CHECK8-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8575 // CHECK8-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
8576 // CHECK8-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8577 // CHECK8-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
8578 // CHECK8-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8579 // CHECK8-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
8580 // CHECK8-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8581 // CHECK8-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
8582 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8583 // CHECK8-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
8584 // CHECK8-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
8585 // CHECK8-NEXT:    ret i32 0
8586 //
8587 //
8588 // CHECK9-LABEL: define {{[^@]+}}@main
8589 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
8590 // CHECK9-NEXT:  entry:
8591 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8592 // CHECK9-NEXT:    [[A:%.*]] = alloca double*, align 8
8593 // CHECK9-NEXT:    [[B:%.*]] = alloca double*, align 8
8594 // CHECK9-NEXT:    [[C:%.*]] = alloca double*, align 8
8595 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
8596 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
8597 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8598 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
8599 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
8600 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
8601 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8602 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8603 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8604 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
8605 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
8606 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
8607 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
8608 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
8609 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
8610 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
8611 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
8612 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
8613 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
8614 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
8615 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
8616 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
8617 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
8618 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
8619 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
8620 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
8621 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
8622 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
8623 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
8624 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
8625 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
8626 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
8627 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
8628 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
8629 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
8630 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
8631 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
8632 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
8633 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
8634 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
8635 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
8636 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
8637 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
8638 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
8639 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
8640 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
8641 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
8642 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
8643 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
8644 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
8645 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
8646 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
8647 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
8648 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
8649 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8650 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
8651 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
8652 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
8653 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8654 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
8655 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
8656 // CHECK9-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 8
8657 // CHECK9-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 8
8658 // CHECK9-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 8
8659 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8660 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
8661 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
8662 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8663 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
8664 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
8665 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8666 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
8667 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8668 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
8669 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 8
8670 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8671 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
8672 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 8
8673 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8674 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
8675 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8676 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
8677 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 8
8678 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8679 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
8680 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 8
8681 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8682 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
8683 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
8684 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
8685 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 8
8686 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
8687 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
8688 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 8
8689 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
8690 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
8691 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8692 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8693 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
8694 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
8695 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8696 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
8697 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8698 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8699 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8700 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8701 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
8702 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
8703 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
8704 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8705 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8706 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8707 // CHECK9:       omp_offload.failed:
8708 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
8709 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8710 // CHECK9:       omp_offload.cont:
8711 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
8712 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
8713 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
8714 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
8715 // CHECK9-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 8
8716 // CHECK9-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 8
8717 // CHECK9-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 8
8718 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8719 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
8720 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
8721 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8722 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
8723 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
8724 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
8725 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
8726 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
8727 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
8728 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 8
8729 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
8730 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
8731 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 8
8732 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
8733 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
8734 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
8735 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
8736 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 8
8737 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
8738 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
8739 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 8
8740 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
8741 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
8742 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
8743 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
8744 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 8
8745 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
8746 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
8747 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 8
8748 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
8749 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
8750 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8751 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8752 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
8753 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
8754 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
8755 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
8756 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
8757 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
8758 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
8759 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
8760 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
8761 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
8762 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
8763 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8764 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
8765 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
8766 // CHECK9:       omp_offload.failed15:
8767 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
8768 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
8769 // CHECK9:       omp_offload.cont16:
8770 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
8771 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
8772 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
8773 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
8774 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
8775 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
8776 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
8777 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
8778 // CHECK9-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 8
8779 // CHECK9-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 8
8780 // CHECK9-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 8
8781 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8782 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
8783 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
8784 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8785 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
8786 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
8787 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
8788 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
8789 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
8790 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
8791 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
8792 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
8793 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
8794 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
8795 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
8796 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
8797 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
8798 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
8799 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 8
8800 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
8801 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
8802 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 8
8803 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
8804 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
8805 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
8806 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
8807 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 8
8808 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
8809 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
8810 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 8
8811 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
8812 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
8813 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
8814 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
8815 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 8
8816 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
8817 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
8818 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 8
8819 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
8820 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
8821 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8822 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8823 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
8824 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
8825 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
8826 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
8827 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
8828 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
8829 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
8830 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
8831 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
8832 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
8833 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
8834 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8835 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
8836 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
8837 // CHECK9:       omp_offload.failed30:
8838 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP67]], i64 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
8839 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
8840 // CHECK9:       omp_offload.cont31:
8841 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
8842 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
8843 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
8844 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
8845 // CHECK9-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 8
8846 // CHECK9-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 8
8847 // CHECK9-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 8
8848 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8849 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
8850 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
8851 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8852 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
8853 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
8854 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
8855 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
8856 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
8857 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
8858 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 8
8859 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
8860 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
8861 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 8
8862 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
8863 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
8864 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
8865 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
8866 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 8
8867 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
8868 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
8869 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 8
8870 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
8871 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
8872 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
8873 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
8874 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 8
8875 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
8876 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
8877 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 8
8878 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
8879 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
8880 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8881 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8882 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
8883 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
8884 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
8885 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
8886 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
8887 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
8888 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
8889 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
8890 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
8891 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
8892 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
8893 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8894 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
8895 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
8896 // CHECK9:       omp_offload.failed44:
8897 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
8898 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
8899 // CHECK9:       omp_offload.cont45:
8900 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
8901 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
8902 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
8903 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
8904 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
8905 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
8906 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
8907 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
8908 // CHECK9-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 8
8909 // CHECK9-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 8
8910 // CHECK9-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 8
8911 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8912 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
8913 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
8914 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8915 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
8916 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
8917 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
8918 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
8919 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
8920 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
8921 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
8922 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
8923 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
8924 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
8925 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
8926 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
8927 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
8928 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
8929 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 8
8930 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
8931 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
8932 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 8
8933 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
8934 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
8935 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
8936 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
8937 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 8
8938 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
8939 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
8940 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 8
8941 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
8942 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
8943 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
8944 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
8945 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 8
8946 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
8947 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
8948 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 8
8949 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
8950 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
8951 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8952 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8953 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
8954 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
8955 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
8956 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
8957 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
8958 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
8959 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
8960 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
8961 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
8962 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
8963 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
8964 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8965 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
8966 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
8967 // CHECK9:       omp_offload.failed60:
8968 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP140]], i64 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
8969 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
8970 // CHECK9:       omp_offload.cont61:
8971 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
8972 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
8973 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
8974 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
8975 // CHECK9-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 8
8976 // CHECK9-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 8
8977 // CHECK9-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 8
8978 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
8979 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
8980 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
8981 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
8982 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
8983 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
8984 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
8985 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
8986 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
8987 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
8988 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 8
8989 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
8990 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
8991 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 8
8992 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
8993 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
8994 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
8995 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
8996 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 8
8997 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
8998 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
8999 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 8
9000 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
9001 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
9002 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
9003 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
9004 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 8
9005 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
9006 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
9007 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 8
9008 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
9009 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
9010 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
9011 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
9012 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
9013 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
9014 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
9015 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
9016 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
9017 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
9018 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
9019 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
9020 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
9021 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
9022 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
9023 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9024 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
9025 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
9026 // CHECK9:       omp_offload.failed74:
9027 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
9028 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
9029 // CHECK9:       omp_offload.cont75:
9030 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
9031 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
9032 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
9033 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
9034 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
9035 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
9036 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
9037 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
9038 // CHECK9-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 8
9039 // CHECK9-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 8
9040 // CHECK9-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 8
9041 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
9042 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
9043 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
9044 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
9045 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
9046 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
9047 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
9048 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
9049 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
9050 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
9051 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
9052 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
9053 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
9054 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
9055 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
9056 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
9057 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
9058 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
9059 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 8
9060 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
9061 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
9062 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 8
9063 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
9064 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
9065 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
9066 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
9067 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 8
9068 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
9069 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
9070 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 8
9071 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
9072 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
9073 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
9074 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
9075 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 8
9076 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
9077 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
9078 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 8
9079 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
9080 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
9081 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
9082 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
9083 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
9084 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
9085 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
9086 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
9087 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
9088 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
9089 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
9090 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
9091 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
9092 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
9093 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
9094 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9095 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
9096 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
9097 // CHECK9:       omp_offload.failed90:
9098 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP213]], i64 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
9099 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
9100 // CHECK9:       omp_offload.cont91:
9101 // CHECK9-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
9102 // CHECK9-NEXT:    ret i32 [[CALL]]
9103 //
9104 //
9105 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
9106 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
9107 // CHECK9-NEXT:  entry:
9108 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9109 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9110 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9111 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9112 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9113 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9114 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9115 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9116 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9117 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9118 // CHECK9-NEXT:    ret void
9119 //
9120 //
9121 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
9122 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9123 // CHECK9-NEXT:  entry:
9124 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9125 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9126 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9127 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9128 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9129 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9130 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9131 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9132 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9133 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9134 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9135 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9136 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9137 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9138 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9139 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9140 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9141 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9142 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9143 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9144 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9145 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9146 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9147 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9148 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9149 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9150 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9151 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9152 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9153 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9154 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9155 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9156 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9157 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9158 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9159 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9160 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9161 // CHECK9:       omp.precond.then:
9162 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9163 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9164 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9165 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9166 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9167 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9168 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9169 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9170 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9171 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9172 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9173 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9174 // CHECK9:       cond.true:
9175 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9176 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9177 // CHECK9:       cond.false:
9178 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9179 // CHECK9-NEXT:    br label [[COND_END]]
9180 // CHECK9:       cond.end:
9181 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9182 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9183 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9184 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9185 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9186 // CHECK9:       omp.inner.for.cond:
9187 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9188 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
9189 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9190 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9191 // CHECK9:       omp.inner.for.body:
9192 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
9193 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9194 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
9195 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9196 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
9197 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9198 // CHECK9:       omp.inner.for.inc:
9199 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9200 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
9201 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9202 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9203 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
9204 // CHECK9:       omp.inner.for.end:
9205 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9206 // CHECK9:       omp.loop.exit:
9207 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9208 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9209 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9210 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9211 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9212 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9213 // CHECK9:       .omp.final.then:
9214 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9215 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9216 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9217 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9218 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9219 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
9220 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9221 // CHECK9:       .omp.final.done:
9222 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9223 // CHECK9:       omp.precond.end:
9224 // CHECK9-NEXT:    ret void
9225 //
9226 //
9227 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
9228 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9229 // CHECK9-NEXT:  entry:
9230 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9231 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9232 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9233 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9234 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9235 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9236 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9237 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9238 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9239 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9240 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9241 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9242 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9243 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9244 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9245 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9246 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9247 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9248 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9249 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9250 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9251 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9252 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9253 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9254 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9255 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9256 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9257 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9258 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9259 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9260 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9261 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9262 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9263 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9264 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9265 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9266 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9267 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9268 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9269 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9270 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9271 // CHECK9:       omp.precond.then:
9272 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9273 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9274 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9275 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9276 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9277 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9278 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9279 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9280 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9281 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9282 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9283 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9284 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9285 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9286 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9287 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9288 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9289 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9290 // CHECK9:       cond.true:
9291 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9292 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9293 // CHECK9:       cond.false:
9294 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9295 // CHECK9-NEXT:    br label [[COND_END]]
9296 // CHECK9:       cond.end:
9297 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9298 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9299 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9300 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9301 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9302 // CHECK9:       omp.inner.for.cond:
9303 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9304 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
9305 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9306 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9307 // CHECK9:       omp.inner.for.body:
9308 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9309 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9310 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9311 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
9312 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
9313 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9314 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9315 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9316 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
9317 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
9318 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9319 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9320 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9321 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
9322 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9323 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
9324 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9325 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9326 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9327 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
9328 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9329 // CHECK9:       omp.body.continue:
9330 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9331 // CHECK9:       omp.inner.for.inc:
9332 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9333 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9334 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9335 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
9336 // CHECK9:       omp.inner.for.end:
9337 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9338 // CHECK9:       omp.loop.exit:
9339 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9340 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9341 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9342 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9343 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9344 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9345 // CHECK9:       .omp.final.then:
9346 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9347 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9348 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9349 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9350 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9351 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9352 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9353 // CHECK9:       .omp.final.done:
9354 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9355 // CHECK9:       omp.precond.end:
9356 // CHECK9-NEXT:    ret void
9357 //
9358 //
9359 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
9360 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9361 // CHECK9-NEXT:  entry:
9362 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9363 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9364 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9365 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9366 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9367 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9368 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9369 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9370 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9371 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9372 // CHECK9-NEXT:    ret void
9373 //
9374 //
9375 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
9376 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9377 // CHECK9-NEXT:  entry:
9378 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9379 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9380 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9381 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9382 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9383 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9384 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9385 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9386 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9387 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9388 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9389 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9390 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9391 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9392 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9393 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9394 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9395 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9396 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9397 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9398 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9399 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9400 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9401 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9402 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9403 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9404 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9405 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9406 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9407 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9408 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9409 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9410 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9411 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9412 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9413 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9414 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9415 // CHECK9:       omp.precond.then:
9416 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9417 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9418 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9419 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9420 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9421 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9422 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9423 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9424 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9425 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9426 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9427 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9428 // CHECK9:       cond.true:
9429 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9430 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9431 // CHECK9:       cond.false:
9432 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9433 // CHECK9-NEXT:    br label [[COND_END]]
9434 // CHECK9:       cond.end:
9435 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9436 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9437 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9438 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9439 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9440 // CHECK9:       omp.inner.for.cond:
9441 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9442 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
9443 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9444 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9445 // CHECK9:       omp.inner.for.body:
9446 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
9447 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9448 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
9449 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9450 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
9451 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9452 // CHECK9:       omp.inner.for.inc:
9453 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9454 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
9455 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9456 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9457 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
9458 // CHECK9:       omp.inner.for.end:
9459 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9460 // CHECK9:       omp.loop.exit:
9461 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9462 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9463 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9464 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9465 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9466 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9467 // CHECK9:       .omp.final.then:
9468 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9469 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9470 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9471 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9472 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9473 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
9474 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9475 // CHECK9:       .omp.final.done:
9476 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9477 // CHECK9:       omp.precond.end:
9478 // CHECK9-NEXT:    ret void
9479 //
9480 //
9481 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
9482 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9483 // CHECK9-NEXT:  entry:
9484 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9485 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9486 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9487 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9488 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9489 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9490 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9491 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9492 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9493 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9494 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9495 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9496 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9497 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9498 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9499 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9500 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9501 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9502 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9503 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9504 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9505 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9506 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9507 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9508 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9509 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9510 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9511 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9512 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9513 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9514 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9515 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9516 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9517 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9518 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9519 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9520 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9521 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9522 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9523 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9524 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9525 // CHECK9:       omp.precond.then:
9526 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9527 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9528 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9529 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9530 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9531 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9532 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9533 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9534 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9535 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9536 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9537 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9538 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9539 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9540 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9541 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9542 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9543 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9544 // CHECK9:       cond.true:
9545 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9546 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9547 // CHECK9:       cond.false:
9548 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9549 // CHECK9-NEXT:    br label [[COND_END]]
9550 // CHECK9:       cond.end:
9551 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9552 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9553 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9554 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9555 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9556 // CHECK9:       omp.inner.for.cond:
9557 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9558 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
9559 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9560 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9561 // CHECK9:       omp.inner.for.body:
9562 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9563 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9564 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9565 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
9566 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
9567 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9568 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9569 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9570 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
9571 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
9572 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9573 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9574 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9575 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
9576 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9577 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
9578 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9579 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9580 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9581 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
9582 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9583 // CHECK9:       omp.body.continue:
9584 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9585 // CHECK9:       omp.inner.for.inc:
9586 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9587 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9588 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9589 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
9590 // CHECK9:       omp.inner.for.end:
9591 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9592 // CHECK9:       omp.loop.exit:
9593 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9594 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9595 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9596 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9597 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9598 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9599 // CHECK9:       .omp.final.then:
9600 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9601 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9602 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9603 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9604 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9605 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9606 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9607 // CHECK9:       .omp.final.done:
9608 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9609 // CHECK9:       omp.precond.end:
9610 // CHECK9-NEXT:    ret void
9611 //
9612 //
9613 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
9614 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9615 // CHECK9-NEXT:  entry:
9616 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
9617 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9618 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9619 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9620 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9621 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
9622 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9623 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9624 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9625 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9626 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
9627 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9628 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9629 // CHECK9-NEXT:    ret void
9630 //
9631 //
9632 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6
9633 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9634 // CHECK9-NEXT:  entry:
9635 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9636 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9637 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
9638 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9639 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9640 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9641 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9642 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9643 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9644 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9645 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9646 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9647 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9648 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9649 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9650 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9651 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9652 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9653 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9654 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
9655 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9656 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9657 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9658 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9659 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
9660 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9661 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
9662 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
9663 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
9664 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
9665 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
9666 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9667 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
9668 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9669 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9670 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9671 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9672 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9673 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
9674 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9675 // CHECK9:       omp.precond.then:
9676 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9677 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9678 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
9679 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9680 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9681 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
9682 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9683 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9684 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
9685 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9686 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9687 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9688 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9689 // CHECK9:       cond.true:
9690 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9691 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9692 // CHECK9:       cond.false:
9693 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9694 // CHECK9-NEXT:    br label [[COND_END]]
9695 // CHECK9:       cond.end:
9696 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9697 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9698 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9699 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9700 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9701 // CHECK9:       omp.inner.for.cond:
9702 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9703 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9704 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
9705 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
9706 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9707 // CHECK9:       omp.inner.for.body:
9708 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9709 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9710 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9711 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
9712 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
9713 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9714 // CHECK9:       omp.inner.for.inc:
9715 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9716 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9717 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
9718 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9719 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9720 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9721 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
9722 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9723 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9724 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9725 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
9726 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9727 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9728 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9729 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
9730 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
9731 // CHECK9:       cond.true10:
9732 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9733 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
9734 // CHECK9:       cond.false11:
9735 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9736 // CHECK9-NEXT:    br label [[COND_END12]]
9737 // CHECK9:       cond.end12:
9738 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
9739 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9740 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9741 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9742 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
9743 // CHECK9:       omp.inner.for.end:
9744 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9745 // CHECK9:       omp.loop.exit:
9746 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9747 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
9748 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
9749 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9750 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
9751 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9752 // CHECK9:       .omp.final.then:
9753 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9754 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
9755 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
9756 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
9757 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
9758 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
9759 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9760 // CHECK9:       .omp.final.done:
9761 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9762 // CHECK9:       omp.precond.end:
9763 // CHECK9-NEXT:    ret void
9764 //
9765 //
9766 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7
9767 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9768 // CHECK9-NEXT:  entry:
9769 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9770 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9771 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9772 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9773 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9774 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9775 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9776 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9777 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9778 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9779 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9780 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9781 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9782 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9783 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9784 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9785 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9786 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9787 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9788 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9789 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9790 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9791 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9792 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9793 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9794 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9795 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9796 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9797 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9798 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9799 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9800 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9801 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9802 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9803 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9804 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9805 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9806 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9807 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9808 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9809 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9810 // CHECK9:       omp.precond.then:
9811 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9812 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9813 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9814 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9815 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9816 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9817 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9818 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9819 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9820 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9821 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9822 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9823 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9824 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9825 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9826 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9827 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9828 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9829 // CHECK9:       cond.true:
9830 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9831 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9832 // CHECK9:       cond.false:
9833 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9834 // CHECK9-NEXT:    br label [[COND_END]]
9835 // CHECK9:       cond.end:
9836 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9837 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9838 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9839 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9840 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9841 // CHECK9:       omp.inner.for.cond:
9842 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9843 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
9844 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9845 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9846 // CHECK9:       omp.inner.for.body:
9847 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9848 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9849 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9850 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
9851 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
9852 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9853 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9854 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9855 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
9856 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
9857 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9858 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9859 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9860 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
9861 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9862 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
9863 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9864 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9865 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9866 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
9867 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9868 // CHECK9:       omp.body.continue:
9869 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9870 // CHECK9:       omp.inner.for.inc:
9871 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9872 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9873 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9874 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
9875 // CHECK9:       omp.inner.for.end:
9876 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9877 // CHECK9:       omp.loop.exit:
9878 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9879 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9880 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9881 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9882 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9883 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9884 // CHECK9:       .omp.final.then:
9885 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9886 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9887 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9888 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9889 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9890 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9891 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9892 // CHECK9:       .omp.final.done:
9893 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9894 // CHECK9:       omp.precond.end:
9895 // CHECK9-NEXT:    ret void
9896 //
9897 //
9898 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
9899 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9900 // CHECK9-NEXT:  entry:
9901 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9902 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9903 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9904 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9905 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9906 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9907 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9908 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9909 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9910 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9911 // CHECK9-NEXT:    ret void
9912 //
9913 //
9914 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10
9915 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9916 // CHECK9-NEXT:  entry:
9917 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9918 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9919 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9920 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9921 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9922 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9923 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9924 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9925 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9926 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9927 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9928 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9929 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9930 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9931 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9932 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9933 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9934 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9935 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9936 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9937 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9938 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9939 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9940 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9941 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9942 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9943 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9944 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9945 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9946 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9947 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9948 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9949 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9950 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9951 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9952 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9953 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9954 // CHECK9:       omp.precond.then:
9955 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9956 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9957 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9958 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9959 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9960 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9961 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9962 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9963 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9964 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9965 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9966 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9967 // CHECK9:       cond.true:
9968 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9969 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9970 // CHECK9:       cond.false:
9971 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9972 // CHECK9-NEXT:    br label [[COND_END]]
9973 // CHECK9:       cond.end:
9974 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9975 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9976 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9977 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9978 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9979 // CHECK9:       omp.inner.for.cond:
9980 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9981 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
9982 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9983 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9984 // CHECK9:       omp.inner.for.body:
9985 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
9986 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9987 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
9988 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9989 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
9990 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9991 // CHECK9:       omp.inner.for.inc:
9992 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9993 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
9994 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9995 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9996 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
9997 // CHECK9:       omp.inner.for.end:
9998 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9999 // CHECK9:       omp.loop.exit:
10000 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10001 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
10002 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
10003 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10004 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
10005 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10006 // CHECK9:       .omp.final.then:
10007 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10008 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
10009 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10010 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10011 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10012 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10013 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10014 // CHECK9:       .omp.final.done:
10015 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10016 // CHECK9:       omp.precond.end:
10017 // CHECK9-NEXT:    ret void
10018 //
10019 //
10020 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11
10021 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10022 // CHECK9-NEXT:  entry:
10023 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10024 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10025 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10026 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10027 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10028 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10029 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10030 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10031 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10032 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10033 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10034 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10035 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10036 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10037 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10038 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10039 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10040 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10041 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10042 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10043 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10044 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10045 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10046 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10047 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10048 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10049 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10050 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10051 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10052 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10053 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10054 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10055 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10056 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10057 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10058 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10059 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10060 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10061 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10062 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10063 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10064 // CHECK9:       omp.precond.then:
10065 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10066 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10067 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10068 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10069 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
10070 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10071 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
10072 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10073 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
10074 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10075 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10076 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10077 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10078 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10079 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10080 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10081 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10082 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10083 // CHECK9:       cond.true:
10084 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10085 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10086 // CHECK9:       cond.false:
10087 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10088 // CHECK9-NEXT:    br label [[COND_END]]
10089 // CHECK9:       cond.end:
10090 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10091 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10092 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10093 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10094 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10095 // CHECK9:       omp.inner.for.cond:
10096 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10097 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
10098 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10099 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10100 // CHECK9:       omp.inner.for.body:
10101 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10102 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10103 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10104 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
10105 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
10106 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10107 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
10108 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
10109 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
10110 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
10111 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10112 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
10113 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
10114 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
10115 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
10116 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
10117 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10118 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
10119 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
10120 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
10121 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10122 // CHECK9:       omp.body.continue:
10123 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10124 // CHECK9:       omp.inner.for.inc:
10125 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10126 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
10127 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10128 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
10129 // CHECK9:       omp.inner.for.end:
10130 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10131 // CHECK9:       omp.loop.exit:
10132 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10133 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10134 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10135 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10136 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10137 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10138 // CHECK9:       .omp.final.then:
10139 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10140 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
10141 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
10142 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
10143 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
10144 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
10145 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10146 // CHECK9:       .omp.final.done:
10147 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10148 // CHECK9:       omp.precond.end:
10149 // CHECK9-NEXT:    ret void
10150 //
10151 //
10152 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
10153 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10154 // CHECK9-NEXT:  entry:
10155 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
10156 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10157 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10158 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10159 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10160 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
10161 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10162 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10163 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10164 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10165 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
10166 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10167 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10168 // CHECK9-NEXT:    ret void
10169 //
10170 //
10171 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14
10172 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10173 // CHECK9-NEXT:  entry:
10174 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10175 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10176 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
10177 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10178 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10179 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10180 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10181 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10182 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10183 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10184 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10185 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10186 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10187 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10188 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10189 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10190 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10191 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10192 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10193 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10194 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10195 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
10196 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10197 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10198 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10199 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10200 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
10201 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10202 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
10203 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
10204 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
10205 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
10206 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10207 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
10208 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10209 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10210 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
10211 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10212 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10213 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10214 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10215 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10216 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
10217 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10218 // CHECK9:       omp.precond.then:
10219 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10220 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10221 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
10222 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10223 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10224 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10225 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10226 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10227 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10228 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10229 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10230 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10231 // CHECK9:       cond.true:
10232 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10233 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10234 // CHECK9:       cond.false:
10235 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10236 // CHECK9-NEXT:    br label [[COND_END]]
10237 // CHECK9:       cond.end:
10238 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10239 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10240 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10241 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10242 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10243 // CHECK9:       omp.inner.for.cond:
10244 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10245 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
10246 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10247 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10248 // CHECK9:       omp.inner.for.body:
10249 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
10250 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10251 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
10252 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
10253 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
10254 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10255 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
10256 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
10257 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
10258 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10259 // CHECK9:       omp.inner.for.inc:
10260 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10261 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
10262 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10263 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10264 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
10265 // CHECK9:       omp.inner.for.end:
10266 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10267 // CHECK9:       omp.loop.exit:
10268 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10269 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
10270 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
10271 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10272 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
10273 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10274 // CHECK9:       .omp.final.then:
10275 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10276 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
10277 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
10278 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
10279 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
10280 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
10281 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10282 // CHECK9:       .omp.final.done:
10283 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10284 // CHECK9:       omp.precond.end:
10285 // CHECK9-NEXT:    ret void
10286 //
10287 //
10288 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15
10289 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
10290 // CHECK9-NEXT:  entry:
10291 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10292 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10293 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10294 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10295 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10296 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10297 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10298 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10299 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10300 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10301 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10302 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10303 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10304 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10305 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10306 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10307 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10308 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10309 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
10310 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10311 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10312 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10313 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10314 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10315 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10316 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10317 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10318 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10319 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10320 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10321 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10322 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10323 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10324 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10325 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10326 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10327 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10328 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10329 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10330 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10331 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10332 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10333 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10334 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10335 // CHECK9:       omp.precond.then:
10336 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10337 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10338 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10339 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10340 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
10341 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10342 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
10343 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
10344 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
10345 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10346 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10347 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
10348 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10349 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
10350 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
10351 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10352 // CHECK9:       omp.dispatch.cond:
10353 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10354 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10355 // CHECK9-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
10356 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
10357 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10358 // CHECK9:       cond.true:
10359 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10360 // CHECK9-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
10361 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10362 // CHECK9:       cond.false:
10363 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10364 // CHECK9-NEXT:    br label [[COND_END]]
10365 // CHECK9:       cond.end:
10366 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
10367 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10368 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10369 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
10370 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10371 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10372 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
10373 // CHECK9-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10374 // CHECK9:       omp.dispatch.body:
10375 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10376 // CHECK9:       omp.inner.for.cond:
10377 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10378 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
10379 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
10380 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10381 // CHECK9:       omp.inner.for.body:
10382 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10383 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
10384 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10385 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
10386 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
10387 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10388 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
10389 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
10390 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
10391 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
10392 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10393 // CHECK9-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
10394 // CHECK9-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
10395 // CHECK9-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !47
10396 // CHECK9-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
10397 // CHECK9-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
10398 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10399 // CHECK9-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
10400 // CHECK9-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
10401 // CHECK9-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !47
10402 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10403 // CHECK9:       omp.body.continue:
10404 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10405 // CHECK9:       omp.inner.for.inc:
10406 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10407 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
10408 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10409 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
10410 // CHECK9:       omp.inner.for.end:
10411 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10412 // CHECK9:       omp.dispatch.inc:
10413 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10414 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10415 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
10416 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
10417 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10418 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10419 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
10420 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
10421 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10422 // CHECK9:       omp.dispatch.end:
10423 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10424 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
10425 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
10426 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10427 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
10428 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10429 // CHECK9:       .omp.final.then:
10430 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10431 // CHECK9-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
10432 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
10433 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
10434 // CHECK9-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
10435 // CHECK9-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
10436 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10437 // CHECK9:       .omp.final.done:
10438 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10439 // CHECK9:       omp.precond.end:
10440 // CHECK9-NEXT:    ret void
10441 //
10442 //
10443 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
10444 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10445 // CHECK9-NEXT:  entry:
10446 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10447 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10448 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10449 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10450 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10451 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10452 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10453 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10454 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10455 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10456 // CHECK9-NEXT:    ret void
10457 //
10458 //
10459 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18
10460 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10461 // CHECK9-NEXT:  entry:
10462 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10463 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10464 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10465 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10466 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10467 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10468 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10469 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10470 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10471 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10472 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10473 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10474 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10475 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10476 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10477 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
10478 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10479 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10480 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10481 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10482 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10483 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10484 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10485 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10486 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10487 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10488 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10489 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10490 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10491 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10492 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10493 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10494 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10495 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10496 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10497 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10498 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10499 // CHECK9:       omp.precond.then:
10500 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10501 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10502 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10503 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10504 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10505 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10506 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10507 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10508 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10509 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10510 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10511 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10512 // CHECK9:       cond.true:
10513 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10514 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10515 // CHECK9:       cond.false:
10516 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10517 // CHECK9-NEXT:    br label [[COND_END]]
10518 // CHECK9:       cond.end:
10519 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10520 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10521 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10522 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10523 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10524 // CHECK9:       omp.inner.for.cond:
10525 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10526 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
10527 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10528 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10529 // CHECK9:       omp.inner.for.body:
10530 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
10531 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
10532 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
10533 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10534 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
10535 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10536 // CHECK9:       omp.inner.for.inc:
10537 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10538 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
10539 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10540 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10541 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
10542 // CHECK9:       omp.inner.for.end:
10543 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10544 // CHECK9:       omp.loop.exit:
10545 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10546 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
10547 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
10548 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10549 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
10550 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10551 // CHECK9:       .omp.final.then:
10552 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10553 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
10554 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10555 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10556 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10557 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10558 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10559 // CHECK9:       .omp.final.done:
10560 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10561 // CHECK9:       omp.precond.end:
10562 // CHECK9-NEXT:    ret void
10563 //
10564 //
10565 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19
10566 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10567 // CHECK9-NEXT:  entry:
10568 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10569 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10570 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10571 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10572 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10573 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10574 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10575 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10576 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10577 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10578 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10579 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10580 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10581 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10582 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10583 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10584 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10585 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10586 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10587 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10588 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10589 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10590 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10591 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10592 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10593 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10594 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10595 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10596 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10597 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10598 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10599 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10600 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10601 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10602 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10603 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10604 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10605 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10606 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10607 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10608 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10609 // CHECK9:       omp.precond.then:
10610 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10611 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10612 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10613 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10614 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
10615 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10616 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
10617 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10618 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
10619 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10620 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10621 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10622 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10623 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10624 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
10625 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
10626 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10627 // CHECK9:       omp.dispatch.cond:
10628 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10629 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
10630 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10631 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
10632 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10633 // CHECK9:       omp.dispatch.body:
10634 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10635 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
10636 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10637 // CHECK9:       omp.inner.for.cond:
10638 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10639 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
10640 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
10641 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10642 // CHECK9:       omp.inner.for.body:
10643 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10644 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
10645 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10646 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
10647 // CHECK9-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
10648 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10649 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
10650 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
10651 // CHECK9-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
10652 // CHECK9-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
10653 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10654 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
10655 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
10656 // CHECK9-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
10657 // CHECK9-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
10658 // CHECK9-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
10659 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10660 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
10661 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
10662 // CHECK9-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
10663 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10664 // CHECK9:       omp.body.continue:
10665 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10666 // CHECK9:       omp.inner.for.inc:
10667 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10668 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
10669 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10670 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
10671 // CHECK9:       omp.inner.for.end:
10672 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10673 // CHECK9:       omp.dispatch.inc:
10674 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10675 // CHECK9:       omp.dispatch.end:
10676 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10677 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
10678 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10679 // CHECK9:       .omp.final.then:
10680 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10681 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
10682 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
10683 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
10684 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
10685 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
10686 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10687 // CHECK9:       .omp.final.done:
10688 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10689 // CHECK9:       omp.precond.end:
10690 // CHECK9-NEXT:    ret void
10691 //
10692 //
10693 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
10694 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10695 // CHECK9-NEXT:  entry:
10696 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
10697 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10698 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10699 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10700 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10701 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
10702 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10703 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10704 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10705 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10706 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
10707 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10708 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10709 // CHECK9-NEXT:    ret void
10710 //
10711 //
10712 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..22
10713 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10714 // CHECK9-NEXT:  entry:
10715 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10716 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10717 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
10718 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10719 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10720 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10721 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10722 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10723 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10724 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10725 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10726 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10727 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10728 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10729 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10730 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10731 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10732 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10733 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10734 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10735 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10736 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
10737 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10738 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10739 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10740 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10741 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
10742 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10743 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
10744 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
10745 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
10746 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
10747 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10748 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
10749 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10750 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10751 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
10752 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10753 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10754 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10755 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10756 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10757 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
10758 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10759 // CHECK9:       omp.precond.then:
10760 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10761 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10762 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
10763 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10764 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10765 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10766 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10767 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10768 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10769 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10770 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10771 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10772 // CHECK9:       cond.true:
10773 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10774 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10775 // CHECK9:       cond.false:
10776 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10777 // CHECK9-NEXT:    br label [[COND_END]]
10778 // CHECK9:       cond.end:
10779 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10780 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10781 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10782 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10783 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10784 // CHECK9:       omp.inner.for.cond:
10785 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10786 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
10787 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10788 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10789 // CHECK9:       omp.inner.for.body:
10790 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
10791 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10792 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
10793 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
10794 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
10795 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10796 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
10797 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
10798 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
10799 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10800 // CHECK9:       omp.inner.for.inc:
10801 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10802 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
10803 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10804 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10805 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
10806 // CHECK9:       omp.inner.for.end:
10807 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10808 // CHECK9:       omp.loop.exit:
10809 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10810 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
10811 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
10812 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10813 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
10814 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10815 // CHECK9:       .omp.final.then:
10816 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10817 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
10818 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
10819 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
10820 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
10821 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
10822 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10823 // CHECK9:       .omp.final.done:
10824 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10825 // CHECK9:       omp.precond.end:
10826 // CHECK9-NEXT:    ret void
10827 //
10828 //
10829 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..23
10830 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
10831 // CHECK9-NEXT:  entry:
10832 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10833 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10834 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10835 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10836 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10837 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10838 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10839 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10840 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10841 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10842 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10843 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10844 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10845 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10846 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10847 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10848 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10849 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10850 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
10851 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10852 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10853 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10854 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10855 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10856 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10857 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10858 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10859 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10860 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10861 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10862 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10863 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10864 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10865 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10866 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10867 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10868 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10869 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10870 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10871 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10872 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10873 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10874 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10875 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10876 // CHECK9:       omp.precond.then:
10877 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10878 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10879 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10880 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10881 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
10882 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10883 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
10884 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
10885 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
10886 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10887 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10888 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
10889 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10890 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10891 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10892 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
10893 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
10894 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10895 // CHECK9:       omp.dispatch.cond:
10896 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10897 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
10898 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10899 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
10900 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10901 // CHECK9:       omp.dispatch.body:
10902 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10903 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
10904 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10905 // CHECK9:       omp.inner.for.cond:
10906 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10907 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
10908 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
10909 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10910 // CHECK9:       omp.inner.for.body:
10911 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10912 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
10913 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10914 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
10915 // CHECK9-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
10916 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10917 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
10918 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
10919 // CHECK9-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
10920 // CHECK9-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
10921 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10922 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
10923 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
10924 // CHECK9-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
10925 // CHECK9-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
10926 // CHECK9-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
10927 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10928 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
10929 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
10930 // CHECK9-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
10931 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10932 // CHECK9:       omp.body.continue:
10933 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10934 // CHECK9:       omp.inner.for.inc:
10935 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10936 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
10937 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10938 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
10939 // CHECK9:       omp.inner.for.end:
10940 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10941 // CHECK9:       omp.dispatch.inc:
10942 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10943 // CHECK9:       omp.dispatch.end:
10944 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10945 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10946 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10947 // CHECK9:       .omp.final.then:
10948 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10949 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
10950 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
10951 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
10952 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
10953 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
10954 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10955 // CHECK9:       .omp.final.done:
10956 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10957 // CHECK9:       omp.precond.end:
10958 // CHECK9-NEXT:    ret void
10959 //
10960 //
10961 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
10962 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] comdat {
10963 // CHECK9-NEXT:  entry:
10964 // CHECK9-NEXT:    [[A:%.*]] = alloca i32*, align 8
10965 // CHECK9-NEXT:    [[B:%.*]] = alloca i32*, align 8
10966 // CHECK9-NEXT:    [[C:%.*]] = alloca i32*, align 8
10967 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
10968 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
10969 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10970 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
10971 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
10972 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
10973 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10974 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10975 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10976 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
10977 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
10978 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
10979 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
10980 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
10981 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
10982 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
10983 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
10984 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
10985 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
10986 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
10987 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
10988 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
10989 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
10990 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
10991 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
10992 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
10993 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
10994 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
10995 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
10996 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
10997 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
10998 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
10999 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
11000 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
11001 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
11002 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
11003 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
11004 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
11005 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
11006 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
11007 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
11008 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
11009 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
11010 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
11011 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
11012 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
11013 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
11014 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
11015 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
11016 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
11017 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
11018 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
11019 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
11020 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
11021 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
11022 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
11023 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
11024 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11025 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
11026 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
11027 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
11028 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
11029 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
11030 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11031 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
11032 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
11033 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11034 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
11035 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
11036 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
11037 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
11038 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11039 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
11040 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 8
11041 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11042 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
11043 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 8
11044 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
11045 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
11046 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11047 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
11048 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 8
11049 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11050 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
11051 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 8
11052 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
11053 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
11054 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
11055 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
11056 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 8
11057 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
11058 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
11059 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 8
11060 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
11061 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
11062 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11063 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11064 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
11065 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
11066 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11067 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
11068 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11069 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11070 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11071 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11072 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
11073 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
11074 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
11075 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11076 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11077 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11078 // CHECK9:       omp_offload.failed:
11079 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
11080 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11081 // CHECK9:       omp_offload.cont:
11082 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
11083 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
11084 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
11085 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
11086 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 8
11087 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 8
11088 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 8
11089 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11090 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
11091 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
11092 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11093 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
11094 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
11095 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
11096 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
11097 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
11098 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
11099 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 8
11100 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
11101 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
11102 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 8
11103 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
11104 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
11105 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
11106 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
11107 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 8
11108 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
11109 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
11110 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 8
11111 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
11112 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
11113 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
11114 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
11115 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 8
11116 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
11117 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
11118 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 8
11119 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
11120 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
11121 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11122 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11123 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
11124 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
11125 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
11126 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
11127 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
11128 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
11129 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
11130 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
11131 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
11132 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
11133 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
11134 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11135 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
11136 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
11137 // CHECK9:       omp_offload.failed15:
11138 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
11139 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
11140 // CHECK9:       omp_offload.cont16:
11141 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
11142 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
11143 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
11144 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
11145 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
11146 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
11147 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
11148 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
11149 // CHECK9-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 8
11150 // CHECK9-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 8
11151 // CHECK9-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 8
11152 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
11153 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
11154 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
11155 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
11156 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
11157 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
11158 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
11159 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
11160 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
11161 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
11162 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
11163 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
11164 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
11165 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
11166 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
11167 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
11168 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
11169 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
11170 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 8
11171 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
11172 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
11173 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 8
11174 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
11175 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
11176 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
11177 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
11178 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 8
11179 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
11180 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
11181 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 8
11182 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
11183 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
11184 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
11185 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
11186 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 8
11187 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
11188 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
11189 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 8
11190 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
11191 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
11192 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
11193 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
11194 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
11195 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
11196 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
11197 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
11198 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
11199 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
11200 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
11201 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
11202 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
11203 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
11204 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
11205 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11206 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
11207 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
11208 // CHECK9:       omp_offload.failed30:
11209 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP67]], i64 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
11210 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
11211 // CHECK9:       omp_offload.cont31:
11212 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
11213 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
11214 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
11215 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
11216 // CHECK9-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 8
11217 // CHECK9-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 8
11218 // CHECK9-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 8
11219 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
11220 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
11221 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
11222 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
11223 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
11224 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
11225 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
11226 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
11227 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
11228 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
11229 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 8
11230 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
11231 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
11232 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 8
11233 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
11234 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
11235 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
11236 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
11237 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 8
11238 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
11239 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
11240 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 8
11241 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
11242 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
11243 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
11244 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
11245 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 8
11246 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
11247 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
11248 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 8
11249 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
11250 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
11251 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
11252 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
11253 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
11254 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
11255 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
11256 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
11257 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
11258 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
11259 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
11260 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
11261 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
11262 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
11263 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
11264 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11265 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
11266 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
11267 // CHECK9:       omp_offload.failed44:
11268 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
11269 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
11270 // CHECK9:       omp_offload.cont45:
11271 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
11272 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
11273 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
11274 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
11275 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
11276 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
11277 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
11278 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
11279 // CHECK9-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 8
11280 // CHECK9-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 8
11281 // CHECK9-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 8
11282 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
11283 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
11284 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
11285 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
11286 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
11287 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
11288 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
11289 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
11290 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
11291 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
11292 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
11293 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
11294 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
11295 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
11296 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
11297 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
11298 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
11299 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
11300 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 8
11301 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
11302 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
11303 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 8
11304 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
11305 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
11306 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
11307 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
11308 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 8
11309 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
11310 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
11311 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 8
11312 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
11313 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
11314 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
11315 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
11316 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 8
11317 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
11318 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
11319 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 8
11320 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
11321 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
11322 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
11323 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
11324 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
11325 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
11326 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
11327 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
11328 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
11329 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
11330 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
11331 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
11332 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
11333 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
11334 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
11335 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11336 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
11337 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
11338 // CHECK9:       omp_offload.failed60:
11339 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP140]], i64 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
11340 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
11341 // CHECK9:       omp_offload.cont61:
11342 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
11343 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
11344 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
11345 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
11346 // CHECK9-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 8
11347 // CHECK9-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 8
11348 // CHECK9-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 8
11349 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
11350 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
11351 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
11352 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
11353 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
11354 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
11355 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
11356 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
11357 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
11358 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
11359 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 8
11360 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
11361 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
11362 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 8
11363 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
11364 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
11365 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
11366 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
11367 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 8
11368 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
11369 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
11370 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 8
11371 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
11372 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
11373 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
11374 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
11375 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 8
11376 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
11377 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
11378 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 8
11379 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
11380 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
11381 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
11382 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
11383 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
11384 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
11385 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
11386 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
11387 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
11388 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
11389 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
11390 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
11391 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
11392 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
11393 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
11394 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11395 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
11396 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
11397 // CHECK9:       omp_offload.failed74:
11398 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
11399 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
11400 // CHECK9:       omp_offload.cont75:
11401 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
11402 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
11403 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
11404 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
11405 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
11406 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
11407 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
11408 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
11409 // CHECK9-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 8
11410 // CHECK9-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 8
11411 // CHECK9-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 8
11412 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
11413 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
11414 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
11415 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
11416 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
11417 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
11418 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
11419 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
11420 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
11421 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
11422 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
11423 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
11424 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
11425 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
11426 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
11427 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
11428 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
11429 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
11430 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 8
11431 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
11432 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
11433 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 8
11434 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
11435 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
11436 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
11437 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
11438 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 8
11439 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
11440 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
11441 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 8
11442 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
11443 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
11444 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
11445 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
11446 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 8
11447 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
11448 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
11449 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 8
11450 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
11451 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
11452 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
11453 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
11454 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
11455 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
11456 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
11457 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
11458 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
11459 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
11460 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
11461 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
11462 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
11463 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
11464 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
11465 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11466 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
11467 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
11468 // CHECK9:       omp_offload.failed90:
11469 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP213]], i64 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
11470 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
11471 // CHECK9:       omp_offload.cont91:
11472 // CHECK9-NEXT:    ret i32 0
11473 //
11474 //
11475 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
11476 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11477 // CHECK9-NEXT:  entry:
11478 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11479 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11480 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11481 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11482 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11483 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11484 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11485 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11486 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11487 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
11488 // CHECK9-NEXT:    ret void
11489 //
11490 //
11491 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..26
11492 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11493 // CHECK9-NEXT:  entry:
11494 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11495 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11496 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11497 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11498 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11499 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11500 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11501 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11502 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11503 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11504 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11505 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11506 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11507 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11508 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11509 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
11510 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11511 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11512 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11513 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11514 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11515 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11516 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11517 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11518 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11519 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11520 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11521 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11522 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11523 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11524 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11525 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11526 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11527 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11528 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11529 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11530 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11531 // CHECK9:       omp.precond.then:
11532 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11533 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11534 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11535 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11536 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11537 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11538 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11539 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11540 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11541 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11542 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11543 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11544 // CHECK9:       cond.true:
11545 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11546 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11547 // CHECK9:       cond.false:
11548 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11549 // CHECK9-NEXT:    br label [[COND_END]]
11550 // CHECK9:       cond.end:
11551 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11552 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11553 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11554 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11555 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11556 // CHECK9:       omp.inner.for.cond:
11557 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11558 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
11559 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11560 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11561 // CHECK9:       omp.inner.for.body:
11562 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
11563 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11564 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
11565 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
11566 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
11567 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11568 // CHECK9:       omp.inner.for.inc:
11569 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11570 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
11571 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11572 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11573 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
11574 // CHECK9:       omp.inner.for.end:
11575 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11576 // CHECK9:       omp.loop.exit:
11577 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11578 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
11579 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
11580 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11581 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11582 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11583 // CHECK9:       .omp.final.then:
11584 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11585 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
11586 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11587 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11588 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11589 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
11590 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11591 // CHECK9:       .omp.final.done:
11592 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11593 // CHECK9:       omp.precond.end:
11594 // CHECK9-NEXT:    ret void
11595 //
11596 //
11597 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..27
11598 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11599 // CHECK9-NEXT:  entry:
11600 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11601 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11602 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11603 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11604 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11605 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11606 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11607 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11608 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11609 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11610 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11611 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11612 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11613 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11614 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11615 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11616 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11617 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
11618 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11619 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11620 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11621 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11622 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11623 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11624 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11625 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11626 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11627 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11628 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11629 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11630 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11631 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11632 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11633 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11634 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11635 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11636 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11637 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11638 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11639 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11640 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11641 // CHECK9:       omp.precond.then:
11642 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11643 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11644 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11645 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11646 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
11647 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11648 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
11649 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11650 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
11651 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11652 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11653 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11654 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11655 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11656 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11657 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11658 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11659 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11660 // CHECK9:       cond.true:
11661 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11662 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11663 // CHECK9:       cond.false:
11664 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11665 // CHECK9-NEXT:    br label [[COND_END]]
11666 // CHECK9:       cond.end:
11667 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11668 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11669 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11670 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11671 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11672 // CHECK9:       omp.inner.for.cond:
11673 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11674 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
11675 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11676 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11677 // CHECK9:       omp.inner.for.body:
11678 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11679 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11680 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11681 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
11682 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
11683 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11684 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
11685 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
11686 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
11687 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
11688 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11689 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
11690 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
11691 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
11692 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
11693 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
11694 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11695 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
11696 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
11697 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
11698 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11699 // CHECK9:       omp.body.continue:
11700 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11701 // CHECK9:       omp.inner.for.inc:
11702 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11703 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
11704 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11705 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
11706 // CHECK9:       omp.inner.for.end:
11707 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11708 // CHECK9:       omp.loop.exit:
11709 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11710 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11711 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11712 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11713 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11714 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11715 // CHECK9:       .omp.final.then:
11716 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11717 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
11718 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
11719 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
11720 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
11721 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
11722 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11723 // CHECK9:       .omp.final.done:
11724 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11725 // CHECK9:       omp.precond.end:
11726 // CHECK9-NEXT:    ret void
11727 //
11728 //
11729 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
11730 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11731 // CHECK9-NEXT:  entry:
11732 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11733 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11734 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11735 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11736 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11737 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11738 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11739 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11740 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11741 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
11742 // CHECK9-NEXT:    ret void
11743 //
11744 //
11745 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..30
11746 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11747 // CHECK9-NEXT:  entry:
11748 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11749 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11750 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11751 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11752 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11753 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11754 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11755 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11756 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11757 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11758 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11759 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11760 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11761 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11762 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11763 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
11764 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11765 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11766 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11767 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11768 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11769 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11770 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11771 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11772 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11773 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11774 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11775 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11776 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11777 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11778 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11779 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11780 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11781 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11782 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11783 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11784 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11785 // CHECK9:       omp.precond.then:
11786 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11787 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11788 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11789 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11790 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11791 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11792 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11793 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11794 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11795 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11796 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11797 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11798 // CHECK9:       cond.true:
11799 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11800 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11801 // CHECK9:       cond.false:
11802 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11803 // CHECK9-NEXT:    br label [[COND_END]]
11804 // CHECK9:       cond.end:
11805 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11806 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11807 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11808 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11809 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11810 // CHECK9:       omp.inner.for.cond:
11811 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11812 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
11813 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11814 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11815 // CHECK9:       omp.inner.for.body:
11816 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
11817 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11818 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
11819 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
11820 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
11821 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11822 // CHECK9:       omp.inner.for.inc:
11823 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11824 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
11825 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11826 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11827 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
11828 // CHECK9:       omp.inner.for.end:
11829 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11830 // CHECK9:       omp.loop.exit:
11831 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11832 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
11833 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
11834 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11835 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11836 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11837 // CHECK9:       .omp.final.then:
11838 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11839 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
11840 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11841 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11842 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11843 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
11844 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11845 // CHECK9:       .omp.final.done:
11846 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11847 // CHECK9:       omp.precond.end:
11848 // CHECK9-NEXT:    ret void
11849 //
11850 //
11851 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..31
11852 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11853 // CHECK9-NEXT:  entry:
11854 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11855 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11856 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11857 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11858 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11859 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11860 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11861 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11862 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11863 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11864 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11865 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11866 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11867 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11868 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11869 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11870 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11871 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
11872 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11873 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11874 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11875 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11876 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11877 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11878 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11879 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11880 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11881 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11882 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11883 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11884 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11885 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11886 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11887 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11888 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11889 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11890 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11891 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11892 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11893 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11894 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11895 // CHECK9:       omp.precond.then:
11896 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11897 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11898 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11899 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11900 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
11901 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11902 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
11903 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11904 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
11905 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11906 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11907 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11908 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11909 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11910 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11911 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11912 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11913 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11914 // CHECK9:       cond.true:
11915 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11916 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11917 // CHECK9:       cond.false:
11918 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11919 // CHECK9-NEXT:    br label [[COND_END]]
11920 // CHECK9:       cond.end:
11921 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11922 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11923 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11924 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11925 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11926 // CHECK9:       omp.inner.for.cond:
11927 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11928 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
11929 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11930 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11931 // CHECK9:       omp.inner.for.body:
11932 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11933 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11934 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11935 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
11936 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
11937 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11938 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
11939 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
11940 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
11941 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
11942 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11943 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
11944 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
11945 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
11946 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
11947 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
11948 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11949 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
11950 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
11951 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
11952 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11953 // CHECK9:       omp.body.continue:
11954 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11955 // CHECK9:       omp.inner.for.inc:
11956 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11957 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
11958 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11959 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
11960 // CHECK9:       omp.inner.for.end:
11961 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11962 // CHECK9:       omp.loop.exit:
11963 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11964 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11965 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11966 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11967 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11968 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11969 // CHECK9:       .omp.final.then:
11970 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11971 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
11972 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
11973 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
11974 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
11975 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
11976 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11977 // CHECK9:       .omp.final.done:
11978 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11979 // CHECK9:       omp.precond.end:
11980 // CHECK9-NEXT:    ret void
11981 //
11982 //
11983 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
11984 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11985 // CHECK9-NEXT:  entry:
11986 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
11987 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11988 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11989 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11990 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11991 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
11992 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11993 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11994 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11995 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11996 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
11997 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11998 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
11999 // CHECK9-NEXT:    ret void
12000 //
12001 //
12002 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..34
12003 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12004 // CHECK9-NEXT:  entry:
12005 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12006 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12007 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
12008 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12009 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12010 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12011 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12012 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12013 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12014 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12015 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12016 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12017 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12018 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12019 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12020 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12021 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12022 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12023 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12024 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
12025 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12026 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12027 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12028 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12029 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
12030 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12031 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12032 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12033 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12034 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
12035 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12036 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12037 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
12038 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12039 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12040 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12041 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12042 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12043 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
12044 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12045 // CHECK9:       omp.precond.then:
12046 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12047 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12048 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
12049 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12050 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12051 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
12052 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12053 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12054 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
12055 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12056 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12057 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12058 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12059 // CHECK9:       cond.true:
12060 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12061 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12062 // CHECK9:       cond.false:
12063 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12064 // CHECK9-NEXT:    br label [[COND_END]]
12065 // CHECK9:       cond.end:
12066 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12067 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12068 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12069 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12070 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12071 // CHECK9:       omp.inner.for.cond:
12072 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12073 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12074 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
12075 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
12076 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12077 // CHECK9:       omp.inner.for.body:
12078 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12079 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12080 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12081 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
12082 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
12083 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12084 // CHECK9:       omp.inner.for.inc:
12085 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12086 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12087 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
12088 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12089 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12090 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12091 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12092 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12093 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12094 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12095 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
12096 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12097 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12098 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12099 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
12100 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
12101 // CHECK9:       cond.true10:
12102 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12103 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
12104 // CHECK9:       cond.false11:
12105 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12106 // CHECK9-NEXT:    br label [[COND_END12]]
12107 // CHECK9:       cond.end12:
12108 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
12109 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12110 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12111 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12112 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
12113 // CHECK9:       omp.inner.for.end:
12114 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12115 // CHECK9:       omp.loop.exit:
12116 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12117 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
12118 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
12119 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12120 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
12121 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12122 // CHECK9:       .omp.final.then:
12123 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12124 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
12125 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
12126 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
12127 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
12128 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
12129 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12130 // CHECK9:       .omp.final.done:
12131 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12132 // CHECK9:       omp.precond.end:
12133 // CHECK9-NEXT:    ret void
12134 //
12135 //
12136 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..35
12137 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12138 // CHECK9-NEXT:  entry:
12139 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12140 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12141 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12142 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12143 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12144 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12145 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12146 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12147 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12148 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12149 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12150 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12151 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12152 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12153 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12154 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12155 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12156 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12157 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12158 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12159 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12160 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12161 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12162 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12163 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12164 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12165 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12166 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12167 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12168 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12169 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12170 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12171 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12172 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12173 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12174 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12175 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12176 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12177 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12178 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12179 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12180 // CHECK9:       omp.precond.then:
12181 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12182 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12183 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12184 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12185 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12186 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12187 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12188 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12189 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12190 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12191 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12192 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12193 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12194 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12195 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12196 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12197 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12198 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12199 // CHECK9:       cond.true:
12200 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12201 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12202 // CHECK9:       cond.false:
12203 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12204 // CHECK9-NEXT:    br label [[COND_END]]
12205 // CHECK9:       cond.end:
12206 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12207 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12208 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12209 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12210 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12211 // CHECK9:       omp.inner.for.cond:
12212 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12213 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
12214 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12215 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12216 // CHECK9:       omp.inner.for.body:
12217 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12218 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12219 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12220 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
12221 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
12222 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12223 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
12224 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
12225 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
12226 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
12227 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12228 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
12229 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
12230 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
12231 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12232 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
12233 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12234 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
12235 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
12236 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
12237 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12238 // CHECK9:       omp.body.continue:
12239 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12240 // CHECK9:       omp.inner.for.inc:
12241 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12242 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
12243 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12244 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
12245 // CHECK9:       omp.inner.for.end:
12246 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12247 // CHECK9:       omp.loop.exit:
12248 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12249 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12250 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12251 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12252 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12253 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12254 // CHECK9:       .omp.final.then:
12255 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12256 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
12257 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
12258 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
12259 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
12260 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
12261 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12262 // CHECK9:       .omp.final.done:
12263 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12264 // CHECK9:       omp.precond.end:
12265 // CHECK9-NEXT:    ret void
12266 //
12267 //
12268 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
12269 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12270 // CHECK9-NEXT:  entry:
12271 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12272 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12273 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12274 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12275 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12276 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12277 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12278 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12279 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12280 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12281 // CHECK9-NEXT:    ret void
12282 //
12283 //
12284 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..38
12285 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12286 // CHECK9-NEXT:  entry:
12287 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12288 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12289 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12290 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12291 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12292 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12293 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12294 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12295 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12296 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12297 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12298 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12299 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12300 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12301 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12302 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12303 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12304 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12305 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12306 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12307 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12308 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12309 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12310 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12311 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12312 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12313 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12314 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12315 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12316 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12317 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12318 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12319 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12320 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12321 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12322 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12323 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12324 // CHECK9:       omp.precond.then:
12325 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12326 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12327 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12328 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12329 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12330 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12331 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12332 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12333 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12334 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12335 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12336 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12337 // CHECK9:       cond.true:
12338 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12339 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12340 // CHECK9:       cond.false:
12341 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12342 // CHECK9-NEXT:    br label [[COND_END]]
12343 // CHECK9:       cond.end:
12344 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12345 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12346 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12347 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12348 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12349 // CHECK9:       omp.inner.for.cond:
12350 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12351 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
12352 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12353 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12354 // CHECK9:       omp.inner.for.body:
12355 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
12356 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12357 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
12358 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12359 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
12360 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12361 // CHECK9:       omp.inner.for.inc:
12362 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12363 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
12364 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12365 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12366 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
12367 // CHECK9:       omp.inner.for.end:
12368 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12369 // CHECK9:       omp.loop.exit:
12370 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12371 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12372 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12373 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12374 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12375 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12376 // CHECK9:       .omp.final.then:
12377 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12378 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
12379 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12380 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12381 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12382 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12383 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12384 // CHECK9:       .omp.final.done:
12385 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12386 // CHECK9:       omp.precond.end:
12387 // CHECK9-NEXT:    ret void
12388 //
12389 //
12390 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..39
12391 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12392 // CHECK9-NEXT:  entry:
12393 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12394 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12395 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12396 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12397 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12398 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12399 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12400 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12401 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12402 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12403 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12404 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12405 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12406 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12407 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12408 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12409 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12410 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12411 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12412 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12413 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12414 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12415 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12416 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12417 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12418 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12419 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12420 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12421 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12422 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12423 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12424 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12425 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12426 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12427 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12428 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12429 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12430 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12431 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12432 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12433 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12434 // CHECK9:       omp.precond.then:
12435 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12436 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12437 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12438 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12439 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12440 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12441 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12442 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12443 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12444 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12445 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12446 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12447 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12448 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12449 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12450 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12451 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12452 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12453 // CHECK9:       cond.true:
12454 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12455 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12456 // CHECK9:       cond.false:
12457 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12458 // CHECK9-NEXT:    br label [[COND_END]]
12459 // CHECK9:       cond.end:
12460 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12461 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12462 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12463 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12464 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12465 // CHECK9:       omp.inner.for.cond:
12466 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12467 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
12468 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12469 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12470 // CHECK9:       omp.inner.for.body:
12471 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12472 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12473 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12474 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
12475 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
12476 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12477 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
12478 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
12479 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
12480 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
12481 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12482 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
12483 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
12484 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
12485 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12486 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
12487 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12488 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
12489 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
12490 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
12491 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12492 // CHECK9:       omp.body.continue:
12493 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12494 // CHECK9:       omp.inner.for.inc:
12495 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12496 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
12497 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12498 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
12499 // CHECK9:       omp.inner.for.end:
12500 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12501 // CHECK9:       omp.loop.exit:
12502 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12503 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12504 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12505 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12506 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12507 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12508 // CHECK9:       .omp.final.then:
12509 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12510 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
12511 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
12512 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
12513 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
12514 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
12515 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12516 // CHECK9:       .omp.final.done:
12517 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12518 // CHECK9:       omp.precond.end:
12519 // CHECK9-NEXT:    ret void
12520 //
12521 //
12522 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
12523 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12524 // CHECK9-NEXT:  entry:
12525 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
12526 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12527 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12528 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12529 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12530 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
12531 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12532 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12533 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12534 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12535 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
12536 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12537 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12538 // CHECK9-NEXT:    ret void
12539 //
12540 //
12541 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..42
12542 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12543 // CHECK9-NEXT:  entry:
12544 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12545 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12546 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
12547 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12548 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12549 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12550 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12551 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12552 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12553 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12554 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12555 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12556 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12557 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12558 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12559 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12560 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12561 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12562 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12563 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12564 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12565 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
12566 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12567 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12568 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12569 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12570 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
12571 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12572 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12573 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12574 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12575 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
12576 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12577 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
12578 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12579 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12580 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
12581 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12582 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12583 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12584 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12585 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12586 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
12587 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12588 // CHECK9:       omp.precond.then:
12589 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12590 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12591 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
12592 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12593 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12594 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12595 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12596 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12597 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12598 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12599 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12600 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12601 // CHECK9:       cond.true:
12602 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12603 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12604 // CHECK9:       cond.false:
12605 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12606 // CHECK9-NEXT:    br label [[COND_END]]
12607 // CHECK9:       cond.end:
12608 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12609 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12610 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12611 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12612 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12613 // CHECK9:       omp.inner.for.cond:
12614 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12615 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
12616 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12617 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12618 // CHECK9:       omp.inner.for.body:
12619 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
12620 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12621 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
12622 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
12623 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
12624 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12625 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
12626 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
12627 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
12628 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12629 // CHECK9:       omp.inner.for.inc:
12630 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12631 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
12632 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12633 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12634 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
12635 // CHECK9:       omp.inner.for.end:
12636 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12637 // CHECK9:       omp.loop.exit:
12638 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12639 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
12640 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
12641 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12642 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
12643 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12644 // CHECK9:       .omp.final.then:
12645 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12646 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
12647 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12648 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12649 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12650 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
12651 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12652 // CHECK9:       .omp.final.done:
12653 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12654 // CHECK9:       omp.precond.end:
12655 // CHECK9-NEXT:    ret void
12656 //
12657 //
12658 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..43
12659 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
12660 // CHECK9-NEXT:  entry:
12661 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12662 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12663 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12664 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12665 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12666 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12667 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12668 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12669 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12670 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12671 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12672 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12673 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12674 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12675 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12676 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12677 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12678 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12679 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
12680 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12681 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12682 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12683 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12684 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12685 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12686 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12687 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12688 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12689 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12690 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12691 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12692 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12693 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12694 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12695 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12696 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12697 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12698 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12699 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12700 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12701 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12702 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12703 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12704 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12705 // CHECK9:       omp.precond.then:
12706 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12707 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12708 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12709 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12710 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
12711 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12712 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
12713 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
12714 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
12715 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12716 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12717 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
12718 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12719 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
12720 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
12721 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12722 // CHECK9:       omp.dispatch.cond:
12723 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12724 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12725 // CHECK9-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
12726 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
12727 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12728 // CHECK9:       cond.true:
12729 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12730 // CHECK9-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
12731 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12732 // CHECK9:       cond.false:
12733 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12734 // CHECK9-NEXT:    br label [[COND_END]]
12735 // CHECK9:       cond.end:
12736 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
12737 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12738 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12739 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
12740 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12741 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12742 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
12743 // CHECK9-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12744 // CHECK9:       omp.dispatch.body:
12745 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12746 // CHECK9:       omp.inner.for.cond:
12747 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12748 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
12749 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
12750 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12751 // CHECK9:       omp.inner.for.body:
12752 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12753 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
12754 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12755 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
12756 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
12757 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12758 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
12759 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
12760 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
12761 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
12762 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12763 // CHECK9-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
12764 // CHECK9-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM12]]
12765 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !89
12766 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
12767 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
12768 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12769 // CHECK9-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
12770 // CHECK9-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM15]]
12771 // CHECK9-NEXT:    store i32 [[ADD14]], i32* [[ARRAYIDX16]], align 4, !llvm.access.group !89
12772 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12773 // CHECK9:       omp.body.continue:
12774 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12775 // CHECK9:       omp.inner.for.inc:
12776 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12777 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
12778 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12779 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
12780 // CHECK9:       omp.inner.for.end:
12781 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
12782 // CHECK9:       omp.dispatch.inc:
12783 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12784 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12785 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
12786 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
12787 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12788 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12789 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
12790 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
12791 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
12792 // CHECK9:       omp.dispatch.end:
12793 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12794 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
12795 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
12796 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12797 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
12798 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12799 // CHECK9:       .omp.final.then:
12800 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12801 // CHECK9-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
12802 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
12803 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
12804 // CHECK9-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
12805 // CHECK9-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
12806 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12807 // CHECK9:       .omp.final.done:
12808 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12809 // CHECK9:       omp.precond.end:
12810 // CHECK9-NEXT:    ret void
12811 //
12812 //
12813 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
12814 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12815 // CHECK9-NEXT:  entry:
12816 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12817 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12818 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12819 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12820 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12821 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12822 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12823 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12824 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12825 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12826 // CHECK9-NEXT:    ret void
12827 //
12828 //
12829 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..46
12830 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12831 // CHECK9-NEXT:  entry:
12832 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12833 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12834 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12835 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12836 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12837 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12838 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12839 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12840 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12841 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12842 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12843 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12844 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12845 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12846 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12847 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12848 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12849 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12850 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12851 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12852 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12853 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12854 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12855 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12856 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12857 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12858 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12859 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12860 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12861 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12862 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12863 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12864 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12865 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12866 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12867 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12868 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12869 // CHECK9:       omp.precond.then:
12870 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12871 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12872 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12873 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12874 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12875 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12876 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12877 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12878 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12879 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12880 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12881 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12882 // CHECK9:       cond.true:
12883 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12884 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12885 // CHECK9:       cond.false:
12886 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12887 // CHECK9-NEXT:    br label [[COND_END]]
12888 // CHECK9:       cond.end:
12889 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12890 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12891 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12892 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12893 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12894 // CHECK9:       omp.inner.for.cond:
12895 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12896 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
12897 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12898 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12899 // CHECK9:       omp.inner.for.body:
12900 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
12901 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12902 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
12903 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12904 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
12905 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12906 // CHECK9:       omp.inner.for.inc:
12907 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12908 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
12909 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12910 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12911 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
12912 // CHECK9:       omp.inner.for.end:
12913 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12914 // CHECK9:       omp.loop.exit:
12915 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12916 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12917 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12918 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12919 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12920 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12921 // CHECK9:       .omp.final.then:
12922 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12923 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
12924 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12925 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12926 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12927 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12928 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12929 // CHECK9:       .omp.final.done:
12930 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12931 // CHECK9:       omp.precond.end:
12932 // CHECK9-NEXT:    ret void
12933 //
12934 //
12935 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..47
12936 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12937 // CHECK9-NEXT:  entry:
12938 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12939 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12940 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12941 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12942 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12943 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12944 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12945 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12946 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12947 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12948 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12949 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12950 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12951 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12952 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12953 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12954 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12955 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12956 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12957 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12958 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12959 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12960 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12961 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12962 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12963 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12964 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12965 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12966 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12967 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12968 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12969 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12970 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12971 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12972 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12973 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12974 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12975 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12976 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12977 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12978 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12979 // CHECK9:       omp.precond.then:
12980 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12981 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12982 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12983 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12984 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12985 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12986 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12987 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12988 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12989 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12990 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12991 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12992 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12993 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12994 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
12995 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
12996 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12997 // CHECK9:       omp.dispatch.cond:
12998 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12999 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
13000 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13001 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
13002 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13003 // CHECK9:       omp.dispatch.body:
13004 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13005 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
13006 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13007 // CHECK9:       omp.inner.for.cond:
13008 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13009 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
13010 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
13011 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13012 // CHECK9:       omp.inner.for.body:
13013 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13014 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
13015 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13016 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
13017 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
13018 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13019 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
13020 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
13021 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
13022 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
13023 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13024 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
13025 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
13026 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
13027 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
13028 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
13029 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13030 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
13031 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
13032 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
13033 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13034 // CHECK9:       omp.body.continue:
13035 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13036 // CHECK9:       omp.inner.for.inc:
13037 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13038 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
13039 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13040 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
13041 // CHECK9:       omp.inner.for.end:
13042 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13043 // CHECK9:       omp.dispatch.inc:
13044 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
13045 // CHECK9:       omp.dispatch.end:
13046 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13047 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
13048 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13049 // CHECK9:       .omp.final.then:
13050 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13051 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
13052 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
13053 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
13054 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
13055 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
13056 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13057 // CHECK9:       .omp.final.done:
13058 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13059 // CHECK9:       omp.precond.end:
13060 // CHECK9-NEXT:    ret void
13061 //
13062 //
13063 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
13064 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
13065 // CHECK9-NEXT:  entry:
13066 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
13067 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
13068 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
13069 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
13070 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
13071 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
13072 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
13073 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
13074 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
13075 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
13076 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
13077 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
13078 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13079 // CHECK9-NEXT:    ret void
13080 //
13081 //
13082 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..50
13083 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13084 // CHECK9-NEXT:  entry:
13085 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13086 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13087 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
13088 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13089 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
13090 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
13091 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
13092 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13093 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13094 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13095 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13096 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13097 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
13098 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13099 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13100 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13101 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13102 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
13103 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13104 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13105 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13106 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
13107 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13108 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
13109 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
13110 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
13111 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
13112 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13113 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
13114 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
13115 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
13116 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
13117 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13118 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
13119 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13120 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13121 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
13122 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13123 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13124 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13125 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
13126 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13127 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
13128 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13129 // CHECK9:       omp.precond.then:
13130 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13131 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13132 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
13133 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13134 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13135 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13136 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13137 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13138 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13139 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13140 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13141 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13142 // CHECK9:       cond.true:
13143 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13144 // CHECK9-NEXT:    br label [[COND_END:%.*]]
13145 // CHECK9:       cond.false:
13146 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13147 // CHECK9-NEXT:    br label [[COND_END]]
13148 // CHECK9:       cond.end:
13149 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13150 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13151 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13152 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13153 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13154 // CHECK9:       omp.inner.for.cond:
13155 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13156 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
13157 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13158 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13159 // CHECK9:       omp.inner.for.body:
13160 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
13161 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
13162 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
13163 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
13164 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
13165 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13166 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
13167 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
13168 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
13169 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13170 // CHECK9:       omp.inner.for.inc:
13171 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13172 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
13173 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
13174 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13175 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
13176 // CHECK9:       omp.inner.for.end:
13177 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13178 // CHECK9:       omp.loop.exit:
13179 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13180 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
13181 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
13182 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13183 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
13184 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13185 // CHECK9:       .omp.final.then:
13186 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13187 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
13188 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
13189 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
13190 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
13191 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
13192 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13193 // CHECK9:       .omp.final.done:
13194 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13195 // CHECK9:       omp.precond.end:
13196 // CHECK9-NEXT:    ret void
13197 //
13198 //
13199 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..51
13200 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
13201 // CHECK9-NEXT:  entry:
13202 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13203 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13204 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13205 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13206 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13207 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
13208 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
13209 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
13210 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13211 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13212 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13213 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13214 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13215 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
13216 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13217 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13218 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13219 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13220 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
13221 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13222 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13223 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13224 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13225 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13226 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
13227 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
13228 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
13229 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13230 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13231 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
13232 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
13233 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
13234 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13235 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13236 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13237 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13238 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13239 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13240 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13241 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13242 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
13243 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13244 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13245 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13246 // CHECK9:       omp.precond.then:
13247 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13248 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13249 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13250 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13251 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
13252 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13253 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
13254 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
13255 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
13256 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13257 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13258 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
13259 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13260 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13261 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13262 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
13263 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
13264 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13265 // CHECK9:       omp.dispatch.cond:
13266 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13267 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
13268 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13269 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
13270 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13271 // CHECK9:       omp.dispatch.body:
13272 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13273 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
13274 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13275 // CHECK9:       omp.inner.for.cond:
13276 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13277 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
13278 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
13279 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13280 // CHECK9:       omp.inner.for.body:
13281 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13282 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
13283 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13284 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
13285 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
13286 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13287 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
13288 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
13289 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
13290 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
13291 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13292 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
13293 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
13294 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
13295 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
13296 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
13297 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13298 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
13299 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
13300 // CHECK9-NEXT:    store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
13301 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13302 // CHECK9:       omp.body.continue:
13303 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13304 // CHECK9:       omp.inner.for.inc:
13305 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13306 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
13307 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13308 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
13309 // CHECK9:       omp.inner.for.end:
13310 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13311 // CHECK9:       omp.dispatch.inc:
13312 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
13313 // CHECK9:       omp.dispatch.end:
13314 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13315 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13316 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13317 // CHECK9:       .omp.final.then:
13318 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13319 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
13320 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
13321 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
13322 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
13323 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
13324 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13325 // CHECK9:       .omp.final.done:
13326 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13327 // CHECK9:       omp.precond.end:
13328 // CHECK9-NEXT:    ret void
13329 //
13330 //
13331 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
13332 // CHECK9-SAME: () #[[ATTR4:[0-9]+]] {
13333 // CHECK9-NEXT:  entry:
13334 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
13335 // CHECK9-NEXT:    ret void
13336 //
13337 //
13338 // CHECK10-LABEL: define {{[^@]+}}@main
13339 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
13340 // CHECK10-NEXT:  entry:
13341 // CHECK10-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
13342 // CHECK10-NEXT:    [[A:%.*]] = alloca double*, align 8
13343 // CHECK10-NEXT:    [[B:%.*]] = alloca double*, align 8
13344 // CHECK10-NEXT:    [[C:%.*]] = alloca double*, align 8
13345 // CHECK10-NEXT:    [[N:%.*]] = alloca i32, align 4
13346 // CHECK10-NEXT:    [[CH:%.*]] = alloca i32, align 4
13347 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
13348 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
13349 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
13350 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
13351 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13352 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13353 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13354 // CHECK10-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
13355 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
13356 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
13357 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
13358 // CHECK10-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
13359 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
13360 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
13361 // CHECK10-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
13362 // CHECK10-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
13363 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
13364 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
13365 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
13366 // CHECK10-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
13367 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
13368 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
13369 // CHECK10-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
13370 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
13371 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
13372 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
13373 // CHECK10-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
13374 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
13375 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
13376 // CHECK10-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
13377 // CHECK10-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
13378 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
13379 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
13380 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
13381 // CHECK10-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
13382 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
13383 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
13384 // CHECK10-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
13385 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
13386 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
13387 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
13388 // CHECK10-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
13389 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
13390 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
13391 // CHECK10-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
13392 // CHECK10-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
13393 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
13394 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
13395 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
13396 // CHECK10-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
13397 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
13398 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
13399 // CHECK10-NEXT:    store i32 0, i32* [[RETVAL]], align 4
13400 // CHECK10-NEXT:    store i32 10000, i32* [[N]], align 4
13401 // CHECK10-NEXT:    store i32 100, i32* [[CH]], align 4
13402 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
13403 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
13404 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
13405 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
13406 // CHECK10-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 8
13407 // CHECK10-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 8
13408 // CHECK10-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 8
13409 // CHECK10-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13410 // CHECK10-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
13411 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
13412 // CHECK10-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13413 // CHECK10-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
13414 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
13415 // CHECK10-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
13416 // CHECK10-NEXT:    store i8* null, i8** [[TMP9]], align 8
13417 // CHECK10-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13418 // CHECK10-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
13419 // CHECK10-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 8
13420 // CHECK10-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13421 // CHECK10-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
13422 // CHECK10-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 8
13423 // CHECK10-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
13424 // CHECK10-NEXT:    store i8* null, i8** [[TMP14]], align 8
13425 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13426 // CHECK10-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
13427 // CHECK10-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 8
13428 // CHECK10-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13429 // CHECK10-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
13430 // CHECK10-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 8
13431 // CHECK10-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
13432 // CHECK10-NEXT:    store i8* null, i8** [[TMP19]], align 8
13433 // CHECK10-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
13434 // CHECK10-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
13435 // CHECK10-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 8
13436 // CHECK10-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
13437 // CHECK10-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
13438 // CHECK10-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 8
13439 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
13440 // CHECK10-NEXT:    store i8* null, i8** [[TMP24]], align 8
13441 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13442 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13443 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
13444 // CHECK10-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
13445 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13446 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
13447 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13448 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13449 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13450 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13451 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
13452 // CHECK10-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
13453 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
13454 // CHECK10-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13455 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13456 // CHECK10-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13457 // CHECK10:       omp_offload.failed:
13458 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
13459 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13460 // CHECK10:       omp_offload.cont:
13461 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
13462 // CHECK10-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
13463 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
13464 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
13465 // CHECK10-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 8
13466 // CHECK10-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 8
13467 // CHECK10-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 8
13468 // CHECK10-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
13469 // CHECK10-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
13470 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
13471 // CHECK10-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
13472 // CHECK10-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
13473 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
13474 // CHECK10-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
13475 // CHECK10-NEXT:    store i8* null, i8** [[TMP42]], align 8
13476 // CHECK10-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
13477 // CHECK10-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
13478 // CHECK10-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 8
13479 // CHECK10-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
13480 // CHECK10-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
13481 // CHECK10-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 8
13482 // CHECK10-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
13483 // CHECK10-NEXT:    store i8* null, i8** [[TMP47]], align 8
13484 // CHECK10-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
13485 // CHECK10-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
13486 // CHECK10-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 8
13487 // CHECK10-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
13488 // CHECK10-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
13489 // CHECK10-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 8
13490 // CHECK10-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
13491 // CHECK10-NEXT:    store i8* null, i8** [[TMP52]], align 8
13492 // CHECK10-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
13493 // CHECK10-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
13494 // CHECK10-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 8
13495 // CHECK10-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
13496 // CHECK10-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
13497 // CHECK10-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 8
13498 // CHECK10-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
13499 // CHECK10-NEXT:    store i8* null, i8** [[TMP57]], align 8
13500 // CHECK10-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
13501 // CHECK10-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
13502 // CHECK10-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
13503 // CHECK10-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
13504 // CHECK10-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
13505 // CHECK10-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
13506 // CHECK10-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
13507 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
13508 // CHECK10-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
13509 // CHECK10-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
13510 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
13511 // CHECK10-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
13512 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
13513 // CHECK10-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13514 // CHECK10-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
13515 // CHECK10-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
13516 // CHECK10:       omp_offload.failed15:
13517 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
13518 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
13519 // CHECK10:       omp_offload.cont16:
13520 // CHECK10-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
13521 // CHECK10-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
13522 // CHECK10-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
13523 // CHECK10-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
13524 // CHECK10-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
13525 // CHECK10-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
13526 // CHECK10-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
13527 // CHECK10-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
13528 // CHECK10-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 8
13529 // CHECK10-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 8
13530 // CHECK10-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 8
13531 // CHECK10-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
13532 // CHECK10-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
13533 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
13534 // CHECK10-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
13535 // CHECK10-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
13536 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
13537 // CHECK10-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
13538 // CHECK10-NEXT:    store i8* null, i8** [[TMP77]], align 8
13539 // CHECK10-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
13540 // CHECK10-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
13541 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
13542 // CHECK10-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
13543 // CHECK10-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
13544 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
13545 // CHECK10-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
13546 // CHECK10-NEXT:    store i8* null, i8** [[TMP82]], align 8
13547 // CHECK10-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
13548 // CHECK10-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
13549 // CHECK10-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 8
13550 // CHECK10-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
13551 // CHECK10-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
13552 // CHECK10-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 8
13553 // CHECK10-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
13554 // CHECK10-NEXT:    store i8* null, i8** [[TMP87]], align 8
13555 // CHECK10-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
13556 // CHECK10-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
13557 // CHECK10-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 8
13558 // CHECK10-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
13559 // CHECK10-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
13560 // CHECK10-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 8
13561 // CHECK10-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
13562 // CHECK10-NEXT:    store i8* null, i8** [[TMP92]], align 8
13563 // CHECK10-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
13564 // CHECK10-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
13565 // CHECK10-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 8
13566 // CHECK10-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
13567 // CHECK10-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
13568 // CHECK10-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 8
13569 // CHECK10-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
13570 // CHECK10-NEXT:    store i8* null, i8** [[TMP97]], align 8
13571 // CHECK10-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
13572 // CHECK10-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
13573 // CHECK10-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
13574 // CHECK10-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
13575 // CHECK10-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
13576 // CHECK10-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
13577 // CHECK10-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
13578 // CHECK10-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
13579 // CHECK10-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
13580 // CHECK10-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
13581 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
13582 // CHECK10-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
13583 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
13584 // CHECK10-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13585 // CHECK10-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
13586 // CHECK10-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
13587 // CHECK10:       omp_offload.failed30:
13588 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP67]], i64 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
13589 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
13590 // CHECK10:       omp_offload.cont31:
13591 // CHECK10-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
13592 // CHECK10-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
13593 // CHECK10-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
13594 // CHECK10-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
13595 // CHECK10-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 8
13596 // CHECK10-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 8
13597 // CHECK10-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 8
13598 // CHECK10-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13599 // CHECK10-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
13600 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
13601 // CHECK10-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13602 // CHECK10-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
13603 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
13604 // CHECK10-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
13605 // CHECK10-NEXT:    store i8* null, i8** [[TMP115]], align 8
13606 // CHECK10-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
13607 // CHECK10-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
13608 // CHECK10-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 8
13609 // CHECK10-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
13610 // CHECK10-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
13611 // CHECK10-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 8
13612 // CHECK10-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
13613 // CHECK10-NEXT:    store i8* null, i8** [[TMP120]], align 8
13614 // CHECK10-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
13615 // CHECK10-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
13616 // CHECK10-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 8
13617 // CHECK10-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
13618 // CHECK10-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
13619 // CHECK10-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 8
13620 // CHECK10-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
13621 // CHECK10-NEXT:    store i8* null, i8** [[TMP125]], align 8
13622 // CHECK10-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
13623 // CHECK10-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
13624 // CHECK10-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 8
13625 // CHECK10-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
13626 // CHECK10-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
13627 // CHECK10-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 8
13628 // CHECK10-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
13629 // CHECK10-NEXT:    store i8* null, i8** [[TMP130]], align 8
13630 // CHECK10-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13631 // CHECK10-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13632 // CHECK10-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
13633 // CHECK10-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
13634 // CHECK10-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
13635 // CHECK10-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
13636 // CHECK10-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
13637 // CHECK10-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
13638 // CHECK10-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
13639 // CHECK10-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
13640 // CHECK10-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
13641 // CHECK10-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
13642 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
13643 // CHECK10-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13644 // CHECK10-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
13645 // CHECK10-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
13646 // CHECK10:       omp_offload.failed44:
13647 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
13648 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
13649 // CHECK10:       omp_offload.cont45:
13650 // CHECK10-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
13651 // CHECK10-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
13652 // CHECK10-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
13653 // CHECK10-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
13654 // CHECK10-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
13655 // CHECK10-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
13656 // CHECK10-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
13657 // CHECK10-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
13658 // CHECK10-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 8
13659 // CHECK10-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 8
13660 // CHECK10-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 8
13661 // CHECK10-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13662 // CHECK10-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
13663 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
13664 // CHECK10-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13665 // CHECK10-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
13666 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
13667 // CHECK10-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
13668 // CHECK10-NEXT:    store i8* null, i8** [[TMP150]], align 8
13669 // CHECK10-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
13670 // CHECK10-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
13671 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
13672 // CHECK10-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
13673 // CHECK10-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
13674 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
13675 // CHECK10-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
13676 // CHECK10-NEXT:    store i8* null, i8** [[TMP155]], align 8
13677 // CHECK10-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
13678 // CHECK10-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
13679 // CHECK10-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 8
13680 // CHECK10-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
13681 // CHECK10-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
13682 // CHECK10-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 8
13683 // CHECK10-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
13684 // CHECK10-NEXT:    store i8* null, i8** [[TMP160]], align 8
13685 // CHECK10-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
13686 // CHECK10-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
13687 // CHECK10-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 8
13688 // CHECK10-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
13689 // CHECK10-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
13690 // CHECK10-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 8
13691 // CHECK10-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
13692 // CHECK10-NEXT:    store i8* null, i8** [[TMP165]], align 8
13693 // CHECK10-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
13694 // CHECK10-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
13695 // CHECK10-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 8
13696 // CHECK10-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
13697 // CHECK10-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
13698 // CHECK10-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 8
13699 // CHECK10-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
13700 // CHECK10-NEXT:    store i8* null, i8** [[TMP170]], align 8
13701 // CHECK10-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13702 // CHECK10-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13703 // CHECK10-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
13704 // CHECK10-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
13705 // CHECK10-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
13706 // CHECK10-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
13707 // CHECK10-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
13708 // CHECK10-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
13709 // CHECK10-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
13710 // CHECK10-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
13711 // CHECK10-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
13712 // CHECK10-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
13713 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
13714 // CHECK10-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13715 // CHECK10-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
13716 // CHECK10-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
13717 // CHECK10:       omp_offload.failed60:
13718 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP140]], i64 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
13719 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
13720 // CHECK10:       omp_offload.cont61:
13721 // CHECK10-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
13722 // CHECK10-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
13723 // CHECK10-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
13724 // CHECK10-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
13725 // CHECK10-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 8
13726 // CHECK10-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 8
13727 // CHECK10-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 8
13728 // CHECK10-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
13729 // CHECK10-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
13730 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
13731 // CHECK10-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
13732 // CHECK10-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
13733 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
13734 // CHECK10-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
13735 // CHECK10-NEXT:    store i8* null, i8** [[TMP188]], align 8
13736 // CHECK10-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
13737 // CHECK10-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
13738 // CHECK10-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 8
13739 // CHECK10-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
13740 // CHECK10-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
13741 // CHECK10-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 8
13742 // CHECK10-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
13743 // CHECK10-NEXT:    store i8* null, i8** [[TMP193]], align 8
13744 // CHECK10-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
13745 // CHECK10-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
13746 // CHECK10-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 8
13747 // CHECK10-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
13748 // CHECK10-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
13749 // CHECK10-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 8
13750 // CHECK10-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
13751 // CHECK10-NEXT:    store i8* null, i8** [[TMP198]], align 8
13752 // CHECK10-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
13753 // CHECK10-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
13754 // CHECK10-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 8
13755 // CHECK10-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
13756 // CHECK10-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
13757 // CHECK10-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 8
13758 // CHECK10-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
13759 // CHECK10-NEXT:    store i8* null, i8** [[TMP203]], align 8
13760 // CHECK10-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
13761 // CHECK10-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
13762 // CHECK10-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
13763 // CHECK10-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
13764 // CHECK10-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
13765 // CHECK10-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
13766 // CHECK10-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
13767 // CHECK10-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
13768 // CHECK10-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
13769 // CHECK10-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
13770 // CHECK10-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
13771 // CHECK10-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
13772 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
13773 // CHECK10-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13774 // CHECK10-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
13775 // CHECK10-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
13776 // CHECK10:       omp_offload.failed74:
13777 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
13778 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
13779 // CHECK10:       omp_offload.cont75:
13780 // CHECK10-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
13781 // CHECK10-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
13782 // CHECK10-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
13783 // CHECK10-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
13784 // CHECK10-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
13785 // CHECK10-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
13786 // CHECK10-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
13787 // CHECK10-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
13788 // CHECK10-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 8
13789 // CHECK10-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 8
13790 // CHECK10-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 8
13791 // CHECK10-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
13792 // CHECK10-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
13793 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
13794 // CHECK10-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
13795 // CHECK10-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
13796 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
13797 // CHECK10-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
13798 // CHECK10-NEXT:    store i8* null, i8** [[TMP223]], align 8
13799 // CHECK10-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
13800 // CHECK10-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
13801 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
13802 // CHECK10-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
13803 // CHECK10-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
13804 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
13805 // CHECK10-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
13806 // CHECK10-NEXT:    store i8* null, i8** [[TMP228]], align 8
13807 // CHECK10-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
13808 // CHECK10-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
13809 // CHECK10-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 8
13810 // CHECK10-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
13811 // CHECK10-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
13812 // CHECK10-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 8
13813 // CHECK10-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
13814 // CHECK10-NEXT:    store i8* null, i8** [[TMP233]], align 8
13815 // CHECK10-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
13816 // CHECK10-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
13817 // CHECK10-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 8
13818 // CHECK10-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
13819 // CHECK10-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
13820 // CHECK10-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 8
13821 // CHECK10-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
13822 // CHECK10-NEXT:    store i8* null, i8** [[TMP238]], align 8
13823 // CHECK10-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
13824 // CHECK10-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
13825 // CHECK10-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 8
13826 // CHECK10-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
13827 // CHECK10-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
13828 // CHECK10-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 8
13829 // CHECK10-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
13830 // CHECK10-NEXT:    store i8* null, i8** [[TMP243]], align 8
13831 // CHECK10-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
13832 // CHECK10-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
13833 // CHECK10-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
13834 // CHECK10-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
13835 // CHECK10-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
13836 // CHECK10-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
13837 // CHECK10-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
13838 // CHECK10-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
13839 // CHECK10-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
13840 // CHECK10-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
13841 // CHECK10-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
13842 // CHECK10-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
13843 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
13844 // CHECK10-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13845 // CHECK10-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
13846 // CHECK10-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
13847 // CHECK10:       omp_offload.failed90:
13848 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP213]], i64 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
13849 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
13850 // CHECK10:       omp_offload.cont91:
13851 // CHECK10-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
13852 // CHECK10-NEXT:    ret i32 [[CALL]]
13853 //
13854 //
13855 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
13856 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
13857 // CHECK10-NEXT:  entry:
13858 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
13859 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
13860 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
13861 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
13862 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
13863 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
13864 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
13865 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
13866 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
13867 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
13868 // CHECK10-NEXT:    ret void
13869 //
13870 //
13871 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
13872 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13873 // CHECK10-NEXT:  entry:
13874 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13875 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13876 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13877 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
13878 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
13879 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
13880 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13881 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13882 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13883 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13884 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
13885 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13886 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13887 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13888 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13889 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
13890 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13891 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13892 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13893 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
13894 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
13895 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
13896 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13897 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
13898 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
13899 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
13900 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13901 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13902 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13903 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13904 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13905 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13906 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13907 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
13908 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13909 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13910 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13911 // CHECK10:       omp.precond.then:
13912 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13913 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13914 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13915 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13916 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13917 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13918 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13919 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13920 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13921 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13922 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13923 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13924 // CHECK10:       cond.true:
13925 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13926 // CHECK10-NEXT:    br label [[COND_END:%.*]]
13927 // CHECK10:       cond.false:
13928 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13929 // CHECK10-NEXT:    br label [[COND_END]]
13930 // CHECK10:       cond.end:
13931 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13932 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13933 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13934 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13935 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13936 // CHECK10:       omp.inner.for.cond:
13937 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13938 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
13939 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13940 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13941 // CHECK10:       omp.inner.for.body:
13942 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
13943 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
13944 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
13945 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
13946 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
13947 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13948 // CHECK10:       omp.inner.for.inc:
13949 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13950 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
13951 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
13952 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13953 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
13954 // CHECK10:       omp.inner.for.end:
13955 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13956 // CHECK10:       omp.loop.exit:
13957 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13958 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
13959 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
13960 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13961 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
13962 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13963 // CHECK10:       .omp.final.then:
13964 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13965 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
13966 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13967 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13968 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13969 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
13970 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13971 // CHECK10:       .omp.final.done:
13972 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
13973 // CHECK10:       omp.precond.end:
13974 // CHECK10-NEXT:    ret void
13975 //
13976 //
13977 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1
13978 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13979 // CHECK10-NEXT:  entry:
13980 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13981 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13982 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13983 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13984 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13985 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
13986 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
13987 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
13988 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13989 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13990 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13991 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13992 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
13993 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13994 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13995 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13996 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13997 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
13998 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13999 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14000 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14001 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14002 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14003 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14004 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14005 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14006 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14007 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14008 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14009 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14010 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14011 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14012 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14013 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14014 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14015 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14016 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14017 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14018 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14019 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14020 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14021 // CHECK10:       omp.precond.then:
14022 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14023 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14024 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14025 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14026 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14027 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14028 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14029 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14030 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14031 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14032 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14033 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14034 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14035 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14036 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14037 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14038 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14039 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14040 // CHECK10:       cond.true:
14041 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14042 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14043 // CHECK10:       cond.false:
14044 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14045 // CHECK10-NEXT:    br label [[COND_END]]
14046 // CHECK10:       cond.end:
14047 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14048 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14049 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14050 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14051 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14052 // CHECK10:       omp.inner.for.cond:
14053 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14054 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
14055 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14056 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14057 // CHECK10:       omp.inner.for.body:
14058 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14059 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14060 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14061 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
14062 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
14063 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14064 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14065 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14066 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
14067 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
14068 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14069 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14070 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14071 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
14072 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14073 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
14074 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14075 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14076 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14077 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
14078 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14079 // CHECK10:       omp.body.continue:
14080 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14081 // CHECK10:       omp.inner.for.inc:
14082 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14083 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14084 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14085 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
14086 // CHECK10:       omp.inner.for.end:
14087 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14088 // CHECK10:       omp.loop.exit:
14089 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14090 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14091 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14092 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14093 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14094 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14095 // CHECK10:       .omp.final.then:
14096 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14097 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14098 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14099 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14100 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14101 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14102 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14103 // CHECK10:       .omp.final.done:
14104 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14105 // CHECK10:       omp.precond.end:
14106 // CHECK10-NEXT:    ret void
14107 //
14108 //
14109 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
14110 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14111 // CHECK10-NEXT:  entry:
14112 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14113 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14114 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14115 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14116 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14117 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14118 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14119 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14120 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14121 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14122 // CHECK10-NEXT:    ret void
14123 //
14124 //
14125 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2
14126 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14127 // CHECK10-NEXT:  entry:
14128 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14129 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14130 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14131 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14132 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14133 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14134 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14135 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14136 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14137 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14138 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14139 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14140 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14141 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14142 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14143 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14144 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14145 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14146 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14147 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14148 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14149 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14150 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14151 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14152 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14153 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14154 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14155 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14156 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14157 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14158 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14159 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14160 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14161 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14162 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14163 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14164 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14165 // CHECK10:       omp.precond.then:
14166 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14167 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14168 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
14169 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14170 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14171 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14172 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14173 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14174 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14175 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14176 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14177 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14178 // CHECK10:       cond.true:
14179 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14180 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14181 // CHECK10:       cond.false:
14182 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14183 // CHECK10-NEXT:    br label [[COND_END]]
14184 // CHECK10:       cond.end:
14185 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14186 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14187 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14188 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14189 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14190 // CHECK10:       omp.inner.for.cond:
14191 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14192 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
14193 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14194 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14195 // CHECK10:       omp.inner.for.body:
14196 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
14197 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
14198 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
14199 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14200 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
14201 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14202 // CHECK10:       omp.inner.for.inc:
14203 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14204 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
14205 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14206 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14207 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
14208 // CHECK10:       omp.inner.for.end:
14209 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14210 // CHECK10:       omp.loop.exit:
14211 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14212 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
14213 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
14214 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14215 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
14216 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14217 // CHECK10:       .omp.final.then:
14218 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14219 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
14220 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14221 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14222 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14223 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14224 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14225 // CHECK10:       .omp.final.done:
14226 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14227 // CHECK10:       omp.precond.end:
14228 // CHECK10-NEXT:    ret void
14229 //
14230 //
14231 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3
14232 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14233 // CHECK10-NEXT:  entry:
14234 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14235 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14236 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14237 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14238 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14239 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14240 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14241 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14242 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14243 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14244 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14245 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14246 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14247 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14248 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14249 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14250 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14251 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14252 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14253 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14254 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14255 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14256 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14257 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14258 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14259 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14260 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14261 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14262 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14263 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14264 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14265 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14266 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14267 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14268 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14269 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14270 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14271 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14272 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14273 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14274 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14275 // CHECK10:       omp.precond.then:
14276 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14277 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14278 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14279 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14280 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14281 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14282 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14283 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14284 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14285 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14286 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14287 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14288 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14289 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14290 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14291 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14292 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14293 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14294 // CHECK10:       cond.true:
14295 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14296 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14297 // CHECK10:       cond.false:
14298 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14299 // CHECK10-NEXT:    br label [[COND_END]]
14300 // CHECK10:       cond.end:
14301 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14302 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14303 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14304 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14305 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14306 // CHECK10:       omp.inner.for.cond:
14307 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14308 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
14309 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14310 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14311 // CHECK10:       omp.inner.for.body:
14312 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14313 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14314 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14315 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
14316 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
14317 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14318 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14319 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14320 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
14321 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
14322 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14323 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14324 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14325 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
14326 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14327 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
14328 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14329 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14330 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14331 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
14332 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14333 // CHECK10:       omp.body.continue:
14334 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14335 // CHECK10:       omp.inner.for.inc:
14336 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14337 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14338 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14339 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
14340 // CHECK10:       omp.inner.for.end:
14341 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14342 // CHECK10:       omp.loop.exit:
14343 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14344 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14345 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14346 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14347 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14348 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14349 // CHECK10:       .omp.final.then:
14350 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14351 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14352 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14353 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14354 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14355 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14356 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14357 // CHECK10:       .omp.final.done:
14358 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14359 // CHECK10:       omp.precond.end:
14360 // CHECK10-NEXT:    ret void
14361 //
14362 //
14363 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
14364 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14365 // CHECK10-NEXT:  entry:
14366 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
14367 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14368 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14369 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14370 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14371 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
14372 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14373 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14374 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14375 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14376 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
14377 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14378 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14379 // CHECK10-NEXT:    ret void
14380 //
14381 //
14382 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6
14383 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14384 // CHECK10-NEXT:  entry:
14385 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14386 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14387 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
14388 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14389 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14390 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14391 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14392 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14393 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14394 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14395 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14396 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14397 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14398 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14399 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14400 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14401 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14402 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14403 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14404 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
14405 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14406 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14407 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14408 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14409 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
14410 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14411 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
14412 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
14413 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
14414 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
14415 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
14416 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14417 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
14418 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14419 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14420 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14421 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14422 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14423 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
14424 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14425 // CHECK10:       omp.precond.then:
14426 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14427 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14428 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
14429 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14430 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14431 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
14432 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14433 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14434 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
14435 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14436 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14437 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14438 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14439 // CHECK10:       cond.true:
14440 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14441 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14442 // CHECK10:       cond.false:
14443 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14444 // CHECK10-NEXT:    br label [[COND_END]]
14445 // CHECK10:       cond.end:
14446 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14447 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14448 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14449 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14450 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14451 // CHECK10:       omp.inner.for.cond:
14452 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14453 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14454 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
14455 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
14456 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14457 // CHECK10:       omp.inner.for.body:
14458 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14459 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14460 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14461 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
14462 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
14463 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14464 // CHECK10:       omp.inner.for.inc:
14465 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14466 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14467 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
14468 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14469 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14470 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14471 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
14472 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14473 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14474 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14475 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
14476 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14477 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14478 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14479 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
14480 // CHECK10-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
14481 // CHECK10:       cond.true10:
14482 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14483 // CHECK10-NEXT:    br label [[COND_END12:%.*]]
14484 // CHECK10:       cond.false11:
14485 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14486 // CHECK10-NEXT:    br label [[COND_END12]]
14487 // CHECK10:       cond.end12:
14488 // CHECK10-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
14489 // CHECK10-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14490 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14491 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14492 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
14493 // CHECK10:       omp.inner.for.end:
14494 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14495 // CHECK10:       omp.loop.exit:
14496 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14497 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
14498 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
14499 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14500 // CHECK10-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
14501 // CHECK10-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14502 // CHECK10:       .omp.final.then:
14503 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14504 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
14505 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
14506 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
14507 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
14508 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
14509 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14510 // CHECK10:       .omp.final.done:
14511 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14512 // CHECK10:       omp.precond.end:
14513 // CHECK10-NEXT:    ret void
14514 //
14515 //
14516 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7
14517 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14518 // CHECK10-NEXT:  entry:
14519 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14520 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14521 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14522 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14523 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14524 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14525 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14526 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14527 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14528 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14529 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14530 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14531 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14532 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14533 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14534 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14535 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14536 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14537 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14538 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14539 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14540 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14541 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14542 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14543 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14544 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14545 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14546 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14547 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14548 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14549 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14550 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14551 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14552 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14553 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14554 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14555 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14556 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14557 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14558 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14559 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14560 // CHECK10:       omp.precond.then:
14561 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14562 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14563 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14564 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14565 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14566 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14567 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14568 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14569 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14570 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14571 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14572 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14573 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14574 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14575 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14576 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14577 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14578 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14579 // CHECK10:       cond.true:
14580 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14581 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14582 // CHECK10:       cond.false:
14583 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14584 // CHECK10-NEXT:    br label [[COND_END]]
14585 // CHECK10:       cond.end:
14586 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14587 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14588 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14589 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14590 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14591 // CHECK10:       omp.inner.for.cond:
14592 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14593 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
14594 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14595 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14596 // CHECK10:       omp.inner.for.body:
14597 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14598 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14599 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14600 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
14601 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
14602 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14603 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14604 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14605 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
14606 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
14607 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14608 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14609 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14610 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
14611 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14612 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
14613 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14614 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14615 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14616 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
14617 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14618 // CHECK10:       omp.body.continue:
14619 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14620 // CHECK10:       omp.inner.for.inc:
14621 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14622 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14623 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14624 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
14625 // CHECK10:       omp.inner.for.end:
14626 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14627 // CHECK10:       omp.loop.exit:
14628 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14629 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14630 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14631 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14632 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14633 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14634 // CHECK10:       .omp.final.then:
14635 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14636 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14637 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14638 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14639 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14640 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14641 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14642 // CHECK10:       .omp.final.done:
14643 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14644 // CHECK10:       omp.precond.end:
14645 // CHECK10-NEXT:    ret void
14646 //
14647 //
14648 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
14649 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14650 // CHECK10-NEXT:  entry:
14651 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14652 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14653 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14654 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14655 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14656 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14657 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14658 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14659 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14660 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14661 // CHECK10-NEXT:    ret void
14662 //
14663 //
14664 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10
14665 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14666 // CHECK10-NEXT:  entry:
14667 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14668 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14669 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14670 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14671 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14672 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14673 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14674 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14675 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14676 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14677 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14678 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14679 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14680 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14681 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14682 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14683 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14684 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14685 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14686 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14687 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14688 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14689 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14690 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14691 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14692 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14693 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14694 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14695 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14696 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14697 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14698 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14699 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14700 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14701 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14702 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14703 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14704 // CHECK10:       omp.precond.then:
14705 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14706 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14707 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
14708 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14709 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14710 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14711 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14712 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14713 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14714 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14715 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14716 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14717 // CHECK10:       cond.true:
14718 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14719 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14720 // CHECK10:       cond.false:
14721 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14722 // CHECK10-NEXT:    br label [[COND_END]]
14723 // CHECK10:       cond.end:
14724 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14725 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14726 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14727 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14728 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14729 // CHECK10:       omp.inner.for.cond:
14730 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14731 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
14732 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14733 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14734 // CHECK10:       omp.inner.for.body:
14735 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
14736 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
14737 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
14738 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14739 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
14740 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14741 // CHECK10:       omp.inner.for.inc:
14742 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14743 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
14744 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14745 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14746 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
14747 // CHECK10:       omp.inner.for.end:
14748 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14749 // CHECK10:       omp.loop.exit:
14750 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14751 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
14752 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
14753 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14754 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
14755 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14756 // CHECK10:       .omp.final.then:
14757 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14758 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
14759 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14760 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14761 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14762 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14763 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14764 // CHECK10:       .omp.final.done:
14765 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14766 // CHECK10:       omp.precond.end:
14767 // CHECK10-NEXT:    ret void
14768 //
14769 //
14770 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11
14771 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14772 // CHECK10-NEXT:  entry:
14773 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14774 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14775 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14776 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14777 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14778 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14779 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14780 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14781 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14782 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14783 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14784 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14785 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14786 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14787 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14788 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14789 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14790 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14791 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14792 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14793 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14794 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14795 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14796 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14797 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14798 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14799 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14800 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14801 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14802 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14803 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14804 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14805 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14806 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14807 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14808 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14809 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14810 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14811 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14812 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14813 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14814 // CHECK10:       omp.precond.then:
14815 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14816 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14817 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14818 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14819 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14820 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14821 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14822 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14823 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14824 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14825 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14826 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14827 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14828 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14829 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14830 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14831 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14832 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14833 // CHECK10:       cond.true:
14834 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14835 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14836 // CHECK10:       cond.false:
14837 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14838 // CHECK10-NEXT:    br label [[COND_END]]
14839 // CHECK10:       cond.end:
14840 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14841 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14842 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14843 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14844 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14845 // CHECK10:       omp.inner.for.cond:
14846 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14847 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
14848 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14849 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14850 // CHECK10:       omp.inner.for.body:
14851 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14852 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14853 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14854 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
14855 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
14856 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14857 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14858 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14859 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
14860 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
14861 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14862 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14863 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14864 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
14865 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14866 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
14867 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14868 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14869 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14870 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
14871 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14872 // CHECK10:       omp.body.continue:
14873 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14874 // CHECK10:       omp.inner.for.inc:
14875 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14876 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14877 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14878 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
14879 // CHECK10:       omp.inner.for.end:
14880 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14881 // CHECK10:       omp.loop.exit:
14882 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14883 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14884 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14885 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14886 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14887 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14888 // CHECK10:       .omp.final.then:
14889 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14890 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14891 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14892 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14893 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14894 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14895 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14896 // CHECK10:       .omp.final.done:
14897 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14898 // CHECK10:       omp.precond.end:
14899 // CHECK10-NEXT:    ret void
14900 //
14901 //
14902 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
14903 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14904 // CHECK10-NEXT:  entry:
14905 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
14906 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14907 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14908 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14909 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14910 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
14911 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14912 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14913 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14914 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14915 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
14916 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14917 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14918 // CHECK10-NEXT:    ret void
14919 //
14920 //
14921 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14
14922 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14923 // CHECK10-NEXT:  entry:
14924 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14925 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14926 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
14927 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14928 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14929 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14930 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14931 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14932 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14933 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14934 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14935 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14936 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14937 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14938 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14939 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14940 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14941 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14942 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
14943 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14944 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14945 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
14946 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14947 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14948 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14949 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14950 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
14951 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14952 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
14953 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
14954 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
14955 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
14956 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
14957 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
14958 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14959 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14960 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
14961 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14962 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14963 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14964 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14965 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14966 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
14967 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14968 // CHECK10:       omp.precond.then:
14969 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14970 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14971 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
14972 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14973 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14974 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14975 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14976 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14977 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14978 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14979 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14980 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14981 // CHECK10:       cond.true:
14982 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14983 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14984 // CHECK10:       cond.false:
14985 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14986 // CHECK10-NEXT:    br label [[COND_END]]
14987 // CHECK10:       cond.end:
14988 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14989 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14990 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14991 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14992 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14993 // CHECK10:       omp.inner.for.cond:
14994 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
14995 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
14996 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14997 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14998 // CHECK10:       omp.inner.for.body:
14999 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
15000 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15001 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
15002 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
15003 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
15004 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
15005 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
15006 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
15007 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
15008 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15009 // CHECK10:       omp.inner.for.inc:
15010 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15011 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
15012 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
15013 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15014 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
15015 // CHECK10:       omp.inner.for.end:
15016 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15017 // CHECK10:       omp.loop.exit:
15018 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15019 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
15020 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
15021 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15022 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
15023 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15024 // CHECK10:       .omp.final.then:
15025 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15026 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
15027 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15028 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
15029 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
15030 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
15031 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15032 // CHECK10:       .omp.final.done:
15033 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15034 // CHECK10:       omp.precond.end:
15035 // CHECK10-NEXT:    ret void
15036 //
15037 //
15038 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15
15039 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
15040 // CHECK10-NEXT:  entry:
15041 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15042 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15043 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15044 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15045 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15046 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15047 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15048 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15049 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
15050 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15051 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15052 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15053 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15054 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15055 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15056 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15057 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15058 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15059 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
15060 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15061 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15062 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15063 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15064 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15065 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15066 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15067 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15068 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
15069 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15070 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15071 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15072 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15073 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
15074 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15075 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15076 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15077 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15078 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15079 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15080 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15081 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15082 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15083 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15084 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15085 // CHECK10:       omp.precond.then:
15086 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15087 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15088 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15089 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15090 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
15091 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15092 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
15093 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
15094 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
15095 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15096 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15097 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
15098 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15099 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
15100 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
15101 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15102 // CHECK10:       omp.dispatch.cond:
15103 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15104 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15105 // CHECK10-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
15106 // CHECK10-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
15107 // CHECK10-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15108 // CHECK10:       cond.true:
15109 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15110 // CHECK10-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
15111 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15112 // CHECK10:       cond.false:
15113 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15114 // CHECK10-NEXT:    br label [[COND_END]]
15115 // CHECK10:       cond.end:
15116 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
15117 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
15118 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15119 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
15120 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15121 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15122 // CHECK10-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
15123 // CHECK10-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15124 // CHECK10:       omp.dispatch.body:
15125 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15126 // CHECK10:       omp.inner.for.cond:
15127 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15128 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
15129 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
15130 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15131 // CHECK10:       omp.inner.for.body:
15132 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15133 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
15134 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15135 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
15136 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
15137 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15138 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
15139 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
15140 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
15141 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
15142 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15143 // CHECK10-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
15144 // CHECK10-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
15145 // CHECK10-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !47
15146 // CHECK10-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
15147 // CHECK10-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
15148 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15149 // CHECK10-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
15150 // CHECK10-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
15151 // CHECK10-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !47
15152 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15153 // CHECK10:       omp.body.continue:
15154 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15155 // CHECK10:       omp.inner.for.inc:
15156 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15157 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
15158 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15159 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
15160 // CHECK10:       omp.inner.for.end:
15161 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15162 // CHECK10:       omp.dispatch.inc:
15163 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15164 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15165 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
15166 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
15167 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15168 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15169 // CHECK10-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
15170 // CHECK10-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
15171 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15172 // CHECK10:       omp.dispatch.end:
15173 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15174 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
15175 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
15176 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15177 // CHECK10-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
15178 // CHECK10-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15179 // CHECK10:       .omp.final.then:
15180 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15181 // CHECK10-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
15182 // CHECK10-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
15183 // CHECK10-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
15184 // CHECK10-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
15185 // CHECK10-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
15186 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15187 // CHECK10:       .omp.final.done:
15188 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15189 // CHECK10:       omp.precond.end:
15190 // CHECK10-NEXT:    ret void
15191 //
15192 //
15193 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
15194 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
15195 // CHECK10-NEXT:  entry:
15196 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
15197 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
15198 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
15199 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
15200 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
15201 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
15202 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
15203 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
15204 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
15205 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
15206 // CHECK10-NEXT:    ret void
15207 //
15208 //
15209 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18
15210 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15211 // CHECK10-NEXT:  entry:
15212 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15213 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15214 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15215 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15216 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15217 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15218 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15219 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15220 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15221 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15222 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15223 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15224 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15225 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15226 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15227 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
15228 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15229 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15230 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15231 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15232 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15233 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15234 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15235 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15236 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15237 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15238 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15239 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15240 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15241 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15242 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15243 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15244 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15245 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15246 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15247 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15248 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15249 // CHECK10:       omp.precond.then:
15250 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15251 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15252 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
15253 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15254 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15255 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15256 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
15257 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15258 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15259 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15260 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
15261 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15262 // CHECK10:       cond.true:
15263 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15264 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15265 // CHECK10:       cond.false:
15266 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15267 // CHECK10-NEXT:    br label [[COND_END]]
15268 // CHECK10:       cond.end:
15269 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
15270 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15271 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15272 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
15273 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15274 // CHECK10:       omp.inner.for.cond:
15275 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15276 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
15277 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
15278 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15279 // CHECK10:       omp.inner.for.body:
15280 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
15281 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
15282 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
15283 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15284 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
15285 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15286 // CHECK10:       omp.inner.for.inc:
15287 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15288 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
15289 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
15290 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15291 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
15292 // CHECK10:       omp.inner.for.end:
15293 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15294 // CHECK10:       omp.loop.exit:
15295 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15296 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
15297 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
15298 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15299 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
15300 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15301 // CHECK10:       .omp.final.then:
15302 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15303 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
15304 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
15305 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
15306 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
15307 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
15308 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15309 // CHECK10:       .omp.final.done:
15310 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15311 // CHECK10:       omp.precond.end:
15312 // CHECK10-NEXT:    ret void
15313 //
15314 //
15315 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19
15316 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15317 // CHECK10-NEXT:  entry:
15318 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15319 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15320 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15321 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15322 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15323 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15324 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15325 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15326 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15327 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15328 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15329 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15330 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15331 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15332 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15333 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15334 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15335 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
15336 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15337 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15338 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15339 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15340 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15341 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15342 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15343 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15344 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15345 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15346 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15347 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15348 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15349 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15350 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15351 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15352 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15353 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15354 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15355 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15356 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15357 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15358 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15359 // CHECK10:       omp.precond.then:
15360 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15361 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15362 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15363 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15364 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
15365 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15366 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
15367 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
15368 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
15369 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15370 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15371 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15372 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15373 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15374 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
15375 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
15376 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15377 // CHECK10:       omp.dispatch.cond:
15378 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15379 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
15380 // CHECK10-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15381 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
15382 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15383 // CHECK10:       omp.dispatch.body:
15384 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15385 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
15386 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15387 // CHECK10:       omp.inner.for.cond:
15388 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15389 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
15390 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
15391 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15392 // CHECK10:       omp.inner.for.body:
15393 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15394 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
15395 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15396 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
15397 // CHECK10-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
15398 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15399 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
15400 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
15401 // CHECK10-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
15402 // CHECK10-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
15403 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15404 // CHECK10-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
15405 // CHECK10-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
15406 // CHECK10-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
15407 // CHECK10-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
15408 // CHECK10-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
15409 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15410 // CHECK10-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
15411 // CHECK10-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
15412 // CHECK10-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
15413 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15414 // CHECK10:       omp.body.continue:
15415 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15416 // CHECK10:       omp.inner.for.inc:
15417 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15418 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
15419 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15420 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
15421 // CHECK10:       omp.inner.for.end:
15422 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15423 // CHECK10:       omp.dispatch.inc:
15424 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15425 // CHECK10:       omp.dispatch.end:
15426 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15427 // CHECK10-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
15428 // CHECK10-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15429 // CHECK10:       .omp.final.then:
15430 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15431 // CHECK10-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
15432 // CHECK10-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
15433 // CHECK10-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
15434 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
15435 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
15436 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15437 // CHECK10:       .omp.final.done:
15438 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15439 // CHECK10:       omp.precond.end:
15440 // CHECK10-NEXT:    ret void
15441 //
15442 //
15443 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
15444 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
15445 // CHECK10-NEXT:  entry:
15446 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
15447 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
15448 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
15449 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
15450 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
15451 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
15452 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
15453 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
15454 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
15455 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
15456 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
15457 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
15458 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
15459 // CHECK10-NEXT:    ret void
15460 //
15461 //
15462 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..22
15463 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15464 // CHECK10-NEXT:  entry:
15465 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15466 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15467 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
15468 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15469 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15470 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15471 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15472 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15473 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15474 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15475 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15476 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15477 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15478 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15479 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15480 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15481 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15482 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
15483 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
15484 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15485 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15486 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
15487 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15488 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15489 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15490 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15491 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
15492 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15493 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
15494 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
15495 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
15496 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
15497 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
15498 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
15499 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15500 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15501 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
15502 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15503 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15504 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15505 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15506 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15507 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
15508 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15509 // CHECK10:       omp.precond.then:
15510 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15511 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15512 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
15513 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15514 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15515 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15516 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
15517 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15518 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15519 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15520 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
15521 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15522 // CHECK10:       cond.true:
15523 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15524 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15525 // CHECK10:       cond.false:
15526 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15527 // CHECK10-NEXT:    br label [[COND_END]]
15528 // CHECK10:       cond.end:
15529 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
15530 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15531 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15532 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
15533 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15534 // CHECK10:       omp.inner.for.cond:
15535 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15536 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
15537 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
15538 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15539 // CHECK10:       omp.inner.for.body:
15540 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
15541 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15542 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
15543 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
15544 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
15545 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
15546 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
15547 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
15548 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
15549 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15550 // CHECK10:       omp.inner.for.inc:
15551 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15552 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
15553 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
15554 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15555 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
15556 // CHECK10:       omp.inner.for.end:
15557 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15558 // CHECK10:       omp.loop.exit:
15559 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15560 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
15561 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
15562 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15563 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
15564 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15565 // CHECK10:       .omp.final.then:
15566 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15567 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
15568 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15569 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
15570 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
15571 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
15572 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15573 // CHECK10:       .omp.final.done:
15574 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15575 // CHECK10:       omp.precond.end:
15576 // CHECK10-NEXT:    ret void
15577 //
15578 //
15579 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..23
15580 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
15581 // CHECK10-NEXT:  entry:
15582 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15583 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15584 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15585 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15586 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15587 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15588 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15589 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15590 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
15591 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15592 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15593 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15594 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15595 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15596 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15597 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15598 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15599 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15600 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
15601 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15602 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15603 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15604 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15605 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15606 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15607 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15608 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15609 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
15610 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15611 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15612 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15613 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15614 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
15615 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15616 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15617 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15618 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15619 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15620 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15621 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15622 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15623 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15624 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15625 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15626 // CHECK10:       omp.precond.then:
15627 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15628 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15629 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15630 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15631 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
15632 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15633 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
15634 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
15635 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
15636 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15637 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15638 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
15639 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15640 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15641 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15642 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
15643 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
15644 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15645 // CHECK10:       omp.dispatch.cond:
15646 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15647 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
15648 // CHECK10-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15649 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
15650 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15651 // CHECK10:       omp.dispatch.body:
15652 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15653 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
15654 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15655 // CHECK10:       omp.inner.for.cond:
15656 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15657 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
15658 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
15659 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15660 // CHECK10:       omp.inner.for.body:
15661 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15662 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
15663 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15664 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
15665 // CHECK10-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
15666 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15667 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
15668 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
15669 // CHECK10-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
15670 // CHECK10-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
15671 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15672 // CHECK10-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
15673 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
15674 // CHECK10-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
15675 // CHECK10-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
15676 // CHECK10-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
15677 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15678 // CHECK10-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
15679 // CHECK10-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
15680 // CHECK10-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
15681 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15682 // CHECK10:       omp.body.continue:
15683 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15684 // CHECK10:       omp.inner.for.inc:
15685 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15686 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
15687 // CHECK10-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15688 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
15689 // CHECK10:       omp.inner.for.end:
15690 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15691 // CHECK10:       omp.dispatch.inc:
15692 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15693 // CHECK10:       omp.dispatch.end:
15694 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15695 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
15696 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15697 // CHECK10:       .omp.final.then:
15698 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15699 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
15700 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
15701 // CHECK10-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
15702 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
15703 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
15704 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15705 // CHECK10:       .omp.final.done:
15706 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15707 // CHECK10:       omp.precond.end:
15708 // CHECK10-NEXT:    ret void
15709 //
15710 //
15711 // CHECK10-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
15712 // CHECK10-SAME: () #[[ATTR3:[0-9]+]] comdat {
15713 // CHECK10-NEXT:  entry:
15714 // CHECK10-NEXT:    [[A:%.*]] = alloca i32*, align 8
15715 // CHECK10-NEXT:    [[B:%.*]] = alloca i32*, align 8
15716 // CHECK10-NEXT:    [[C:%.*]] = alloca i32*, align 8
15717 // CHECK10-NEXT:    [[N:%.*]] = alloca i32, align 4
15718 // CHECK10-NEXT:    [[CH:%.*]] = alloca i32, align 4
15719 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
15720 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
15721 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
15722 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
15723 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15724 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15725 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15726 // CHECK10-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
15727 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
15728 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
15729 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
15730 // CHECK10-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
15731 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
15732 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
15733 // CHECK10-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
15734 // CHECK10-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
15735 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
15736 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
15737 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
15738 // CHECK10-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
15739 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
15740 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
15741 // CHECK10-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
15742 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
15743 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
15744 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
15745 // CHECK10-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
15746 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
15747 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
15748 // CHECK10-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
15749 // CHECK10-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
15750 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
15751 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
15752 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
15753 // CHECK10-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
15754 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
15755 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
15756 // CHECK10-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
15757 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
15758 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
15759 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
15760 // CHECK10-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
15761 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
15762 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
15763 // CHECK10-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
15764 // CHECK10-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
15765 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
15766 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
15767 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
15768 // CHECK10-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
15769 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
15770 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
15771 // CHECK10-NEXT:    store i32 10000, i32* [[N]], align 4
15772 // CHECK10-NEXT:    store i32 100, i32* [[CH]], align 4
15773 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15774 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
15775 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
15776 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
15777 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
15778 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
15779 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
15780 // CHECK10-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15781 // CHECK10-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
15782 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
15783 // CHECK10-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15784 // CHECK10-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
15785 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
15786 // CHECK10-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
15787 // CHECK10-NEXT:    store i8* null, i8** [[TMP9]], align 8
15788 // CHECK10-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
15789 // CHECK10-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
15790 // CHECK10-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 8
15791 // CHECK10-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
15792 // CHECK10-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
15793 // CHECK10-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 8
15794 // CHECK10-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
15795 // CHECK10-NEXT:    store i8* null, i8** [[TMP14]], align 8
15796 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
15797 // CHECK10-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
15798 // CHECK10-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 8
15799 // CHECK10-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
15800 // CHECK10-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
15801 // CHECK10-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 8
15802 // CHECK10-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
15803 // CHECK10-NEXT:    store i8* null, i8** [[TMP19]], align 8
15804 // CHECK10-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
15805 // CHECK10-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
15806 // CHECK10-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 8
15807 // CHECK10-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
15808 // CHECK10-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
15809 // CHECK10-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 8
15810 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
15811 // CHECK10-NEXT:    store i8* null, i8** [[TMP24]], align 8
15812 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15813 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15814 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
15815 // CHECK10-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
15816 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15817 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
15818 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15819 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15820 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15821 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15822 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
15823 // CHECK10-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
15824 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
15825 // CHECK10-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15826 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
15827 // CHECK10-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
15828 // CHECK10:       omp_offload.failed:
15829 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
15830 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT]]
15831 // CHECK10:       omp_offload.cont:
15832 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
15833 // CHECK10-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
15834 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
15835 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
15836 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 8
15837 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 8
15838 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 8
15839 // CHECK10-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
15840 // CHECK10-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
15841 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
15842 // CHECK10-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
15843 // CHECK10-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
15844 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
15845 // CHECK10-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
15846 // CHECK10-NEXT:    store i8* null, i8** [[TMP42]], align 8
15847 // CHECK10-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
15848 // CHECK10-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
15849 // CHECK10-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 8
15850 // CHECK10-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
15851 // CHECK10-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
15852 // CHECK10-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 8
15853 // CHECK10-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
15854 // CHECK10-NEXT:    store i8* null, i8** [[TMP47]], align 8
15855 // CHECK10-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
15856 // CHECK10-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
15857 // CHECK10-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 8
15858 // CHECK10-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
15859 // CHECK10-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
15860 // CHECK10-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 8
15861 // CHECK10-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
15862 // CHECK10-NEXT:    store i8* null, i8** [[TMP52]], align 8
15863 // CHECK10-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
15864 // CHECK10-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
15865 // CHECK10-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 8
15866 // CHECK10-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
15867 // CHECK10-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
15868 // CHECK10-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 8
15869 // CHECK10-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
15870 // CHECK10-NEXT:    store i8* null, i8** [[TMP57]], align 8
15871 // CHECK10-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
15872 // CHECK10-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
15873 // CHECK10-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
15874 // CHECK10-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
15875 // CHECK10-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
15876 // CHECK10-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
15877 // CHECK10-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
15878 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
15879 // CHECK10-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
15880 // CHECK10-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
15881 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
15882 // CHECK10-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
15883 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
15884 // CHECK10-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15885 // CHECK10-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
15886 // CHECK10-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
15887 // CHECK10:       omp_offload.failed15:
15888 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
15889 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
15890 // CHECK10:       omp_offload.cont16:
15891 // CHECK10-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
15892 // CHECK10-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
15893 // CHECK10-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
15894 // CHECK10-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
15895 // CHECK10-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
15896 // CHECK10-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
15897 // CHECK10-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
15898 // CHECK10-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
15899 // CHECK10-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 8
15900 // CHECK10-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 8
15901 // CHECK10-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 8
15902 // CHECK10-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
15903 // CHECK10-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
15904 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
15905 // CHECK10-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
15906 // CHECK10-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
15907 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
15908 // CHECK10-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
15909 // CHECK10-NEXT:    store i8* null, i8** [[TMP77]], align 8
15910 // CHECK10-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
15911 // CHECK10-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
15912 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
15913 // CHECK10-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
15914 // CHECK10-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
15915 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
15916 // CHECK10-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
15917 // CHECK10-NEXT:    store i8* null, i8** [[TMP82]], align 8
15918 // CHECK10-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
15919 // CHECK10-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
15920 // CHECK10-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 8
15921 // CHECK10-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
15922 // CHECK10-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
15923 // CHECK10-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 8
15924 // CHECK10-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
15925 // CHECK10-NEXT:    store i8* null, i8** [[TMP87]], align 8
15926 // CHECK10-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
15927 // CHECK10-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
15928 // CHECK10-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 8
15929 // CHECK10-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
15930 // CHECK10-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
15931 // CHECK10-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 8
15932 // CHECK10-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
15933 // CHECK10-NEXT:    store i8* null, i8** [[TMP92]], align 8
15934 // CHECK10-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
15935 // CHECK10-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
15936 // CHECK10-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 8
15937 // CHECK10-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
15938 // CHECK10-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
15939 // CHECK10-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 8
15940 // CHECK10-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
15941 // CHECK10-NEXT:    store i8* null, i8** [[TMP97]], align 8
15942 // CHECK10-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
15943 // CHECK10-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
15944 // CHECK10-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
15945 // CHECK10-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
15946 // CHECK10-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
15947 // CHECK10-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
15948 // CHECK10-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
15949 // CHECK10-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
15950 // CHECK10-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
15951 // CHECK10-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
15952 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
15953 // CHECK10-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
15954 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
15955 // CHECK10-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15956 // CHECK10-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
15957 // CHECK10-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
15958 // CHECK10:       omp_offload.failed30:
15959 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP67]], i64 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
15960 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
15961 // CHECK10:       omp_offload.cont31:
15962 // CHECK10-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
15963 // CHECK10-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
15964 // CHECK10-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
15965 // CHECK10-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
15966 // CHECK10-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 8
15967 // CHECK10-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 8
15968 // CHECK10-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 8
15969 // CHECK10-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
15970 // CHECK10-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
15971 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
15972 // CHECK10-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
15973 // CHECK10-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
15974 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
15975 // CHECK10-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
15976 // CHECK10-NEXT:    store i8* null, i8** [[TMP115]], align 8
15977 // CHECK10-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
15978 // CHECK10-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
15979 // CHECK10-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 8
15980 // CHECK10-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
15981 // CHECK10-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
15982 // CHECK10-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 8
15983 // CHECK10-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
15984 // CHECK10-NEXT:    store i8* null, i8** [[TMP120]], align 8
15985 // CHECK10-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
15986 // CHECK10-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
15987 // CHECK10-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 8
15988 // CHECK10-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
15989 // CHECK10-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
15990 // CHECK10-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 8
15991 // CHECK10-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
15992 // CHECK10-NEXT:    store i8* null, i8** [[TMP125]], align 8
15993 // CHECK10-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
15994 // CHECK10-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
15995 // CHECK10-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 8
15996 // CHECK10-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
15997 // CHECK10-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
15998 // CHECK10-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 8
15999 // CHECK10-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
16000 // CHECK10-NEXT:    store i8* null, i8** [[TMP130]], align 8
16001 // CHECK10-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
16002 // CHECK10-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
16003 // CHECK10-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
16004 // CHECK10-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
16005 // CHECK10-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
16006 // CHECK10-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
16007 // CHECK10-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
16008 // CHECK10-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
16009 // CHECK10-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
16010 // CHECK10-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
16011 // CHECK10-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
16012 // CHECK10-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
16013 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
16014 // CHECK10-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16015 // CHECK10-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
16016 // CHECK10-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
16017 // CHECK10:       omp_offload.failed44:
16018 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
16019 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
16020 // CHECK10:       omp_offload.cont45:
16021 // CHECK10-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
16022 // CHECK10-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
16023 // CHECK10-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
16024 // CHECK10-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
16025 // CHECK10-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
16026 // CHECK10-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
16027 // CHECK10-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
16028 // CHECK10-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
16029 // CHECK10-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 8
16030 // CHECK10-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 8
16031 // CHECK10-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 8
16032 // CHECK10-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
16033 // CHECK10-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
16034 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
16035 // CHECK10-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
16036 // CHECK10-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
16037 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
16038 // CHECK10-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
16039 // CHECK10-NEXT:    store i8* null, i8** [[TMP150]], align 8
16040 // CHECK10-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
16041 // CHECK10-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
16042 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
16043 // CHECK10-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
16044 // CHECK10-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
16045 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
16046 // CHECK10-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
16047 // CHECK10-NEXT:    store i8* null, i8** [[TMP155]], align 8
16048 // CHECK10-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
16049 // CHECK10-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
16050 // CHECK10-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 8
16051 // CHECK10-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
16052 // CHECK10-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
16053 // CHECK10-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 8
16054 // CHECK10-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
16055 // CHECK10-NEXT:    store i8* null, i8** [[TMP160]], align 8
16056 // CHECK10-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
16057 // CHECK10-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
16058 // CHECK10-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 8
16059 // CHECK10-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
16060 // CHECK10-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
16061 // CHECK10-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 8
16062 // CHECK10-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
16063 // CHECK10-NEXT:    store i8* null, i8** [[TMP165]], align 8
16064 // CHECK10-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
16065 // CHECK10-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
16066 // CHECK10-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 8
16067 // CHECK10-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
16068 // CHECK10-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
16069 // CHECK10-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 8
16070 // CHECK10-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
16071 // CHECK10-NEXT:    store i8* null, i8** [[TMP170]], align 8
16072 // CHECK10-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
16073 // CHECK10-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
16074 // CHECK10-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
16075 // CHECK10-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
16076 // CHECK10-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
16077 // CHECK10-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
16078 // CHECK10-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
16079 // CHECK10-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
16080 // CHECK10-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
16081 // CHECK10-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
16082 // CHECK10-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
16083 // CHECK10-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
16084 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
16085 // CHECK10-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16086 // CHECK10-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
16087 // CHECK10-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
16088 // CHECK10:       omp_offload.failed60:
16089 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP140]], i64 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
16090 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
16091 // CHECK10:       omp_offload.cont61:
16092 // CHECK10-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
16093 // CHECK10-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
16094 // CHECK10-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
16095 // CHECK10-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
16096 // CHECK10-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 8
16097 // CHECK10-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 8
16098 // CHECK10-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 8
16099 // CHECK10-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
16100 // CHECK10-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
16101 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
16102 // CHECK10-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
16103 // CHECK10-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
16104 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
16105 // CHECK10-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
16106 // CHECK10-NEXT:    store i8* null, i8** [[TMP188]], align 8
16107 // CHECK10-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
16108 // CHECK10-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
16109 // CHECK10-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 8
16110 // CHECK10-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
16111 // CHECK10-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
16112 // CHECK10-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 8
16113 // CHECK10-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
16114 // CHECK10-NEXT:    store i8* null, i8** [[TMP193]], align 8
16115 // CHECK10-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
16116 // CHECK10-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
16117 // CHECK10-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 8
16118 // CHECK10-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
16119 // CHECK10-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
16120 // CHECK10-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 8
16121 // CHECK10-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
16122 // CHECK10-NEXT:    store i8* null, i8** [[TMP198]], align 8
16123 // CHECK10-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
16124 // CHECK10-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
16125 // CHECK10-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 8
16126 // CHECK10-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
16127 // CHECK10-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
16128 // CHECK10-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 8
16129 // CHECK10-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
16130 // CHECK10-NEXT:    store i8* null, i8** [[TMP203]], align 8
16131 // CHECK10-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
16132 // CHECK10-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
16133 // CHECK10-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
16134 // CHECK10-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
16135 // CHECK10-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
16136 // CHECK10-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
16137 // CHECK10-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
16138 // CHECK10-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
16139 // CHECK10-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
16140 // CHECK10-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
16141 // CHECK10-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
16142 // CHECK10-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
16143 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
16144 // CHECK10-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16145 // CHECK10-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
16146 // CHECK10-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
16147 // CHECK10:       omp_offload.failed74:
16148 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
16149 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
16150 // CHECK10:       omp_offload.cont75:
16151 // CHECK10-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
16152 // CHECK10-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
16153 // CHECK10-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
16154 // CHECK10-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
16155 // CHECK10-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
16156 // CHECK10-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
16157 // CHECK10-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
16158 // CHECK10-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
16159 // CHECK10-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 8
16160 // CHECK10-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 8
16161 // CHECK10-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 8
16162 // CHECK10-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
16163 // CHECK10-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
16164 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
16165 // CHECK10-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
16166 // CHECK10-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
16167 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
16168 // CHECK10-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
16169 // CHECK10-NEXT:    store i8* null, i8** [[TMP223]], align 8
16170 // CHECK10-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
16171 // CHECK10-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
16172 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
16173 // CHECK10-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
16174 // CHECK10-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
16175 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
16176 // CHECK10-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
16177 // CHECK10-NEXT:    store i8* null, i8** [[TMP228]], align 8
16178 // CHECK10-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
16179 // CHECK10-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
16180 // CHECK10-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 8
16181 // CHECK10-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
16182 // CHECK10-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
16183 // CHECK10-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 8
16184 // CHECK10-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
16185 // CHECK10-NEXT:    store i8* null, i8** [[TMP233]], align 8
16186 // CHECK10-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
16187 // CHECK10-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
16188 // CHECK10-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 8
16189 // CHECK10-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
16190 // CHECK10-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
16191 // CHECK10-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 8
16192 // CHECK10-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
16193 // CHECK10-NEXT:    store i8* null, i8** [[TMP238]], align 8
16194 // CHECK10-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
16195 // CHECK10-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
16196 // CHECK10-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 8
16197 // CHECK10-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
16198 // CHECK10-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
16199 // CHECK10-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 8
16200 // CHECK10-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
16201 // CHECK10-NEXT:    store i8* null, i8** [[TMP243]], align 8
16202 // CHECK10-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
16203 // CHECK10-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
16204 // CHECK10-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
16205 // CHECK10-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
16206 // CHECK10-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
16207 // CHECK10-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
16208 // CHECK10-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
16209 // CHECK10-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
16210 // CHECK10-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
16211 // CHECK10-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
16212 // CHECK10-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
16213 // CHECK10-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
16214 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
16215 // CHECK10-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16216 // CHECK10-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
16217 // CHECK10-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
16218 // CHECK10:       omp_offload.failed90:
16219 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP213]], i64 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
16220 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
16221 // CHECK10:       omp_offload.cont91:
16222 // CHECK10-NEXT:    ret i32 0
16223 //
16224 //
16225 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
16226 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16227 // CHECK10-NEXT:  entry:
16228 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16229 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16230 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16231 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16232 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16233 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16234 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16235 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16236 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16237 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16238 // CHECK10-NEXT:    ret void
16239 //
16240 //
16241 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..26
16242 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16243 // CHECK10-NEXT:  entry:
16244 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16245 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16246 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16247 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16248 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16249 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16250 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16251 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16252 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16253 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16254 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16255 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16256 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16257 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16258 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16259 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16260 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16261 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16262 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16263 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16264 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16265 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16266 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16267 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16268 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16269 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16270 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16271 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16272 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16273 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16274 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16275 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16276 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16277 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16278 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16279 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16280 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16281 // CHECK10:       omp.precond.then:
16282 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16283 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16284 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
16285 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16286 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16287 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16288 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
16289 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16290 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16291 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16292 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
16293 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16294 // CHECK10:       cond.true:
16295 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16296 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16297 // CHECK10:       cond.false:
16298 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16299 // CHECK10-NEXT:    br label [[COND_END]]
16300 // CHECK10:       cond.end:
16301 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
16302 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16303 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16304 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
16305 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16306 // CHECK10:       omp.inner.for.cond:
16307 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16308 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
16309 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16310 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16311 // CHECK10:       omp.inner.for.body:
16312 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
16313 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
16314 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
16315 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16316 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
16317 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16318 // CHECK10:       omp.inner.for.inc:
16319 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16320 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
16321 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
16322 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16323 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
16324 // CHECK10:       omp.inner.for.end:
16325 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16326 // CHECK10:       omp.loop.exit:
16327 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16328 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
16329 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
16330 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16331 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
16332 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16333 // CHECK10:       .omp.final.then:
16334 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16335 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
16336 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16337 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
16338 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
16339 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
16340 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16341 // CHECK10:       .omp.final.done:
16342 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16343 // CHECK10:       omp.precond.end:
16344 // CHECK10-NEXT:    ret void
16345 //
16346 //
16347 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..27
16348 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16349 // CHECK10-NEXT:  entry:
16350 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16351 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16352 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16353 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16354 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16355 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16356 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16357 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16358 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16359 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16360 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16361 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16362 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16363 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16364 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16365 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16366 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16367 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16368 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16369 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16370 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16371 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16372 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16373 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16374 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16375 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16376 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16377 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16378 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16379 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16380 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16381 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16382 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16383 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16384 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16385 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16386 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16387 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16388 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16389 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16390 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16391 // CHECK10:       omp.precond.then:
16392 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16393 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16394 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16395 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16396 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16397 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16398 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16399 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16400 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16401 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16402 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16403 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16404 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16405 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16406 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16407 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16408 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16409 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16410 // CHECK10:       cond.true:
16411 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16412 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16413 // CHECK10:       cond.false:
16414 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16415 // CHECK10-NEXT:    br label [[COND_END]]
16416 // CHECK10:       cond.end:
16417 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16418 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16419 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16420 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16421 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16422 // CHECK10:       omp.inner.for.cond:
16423 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16424 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
16425 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16426 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16427 // CHECK10:       omp.inner.for.body:
16428 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16429 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16430 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16431 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
16432 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
16433 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16434 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16435 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16436 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
16437 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
16438 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16439 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16440 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16441 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
16442 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16443 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
16444 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16445 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16446 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16447 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
16448 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16449 // CHECK10:       omp.body.continue:
16450 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16451 // CHECK10:       omp.inner.for.inc:
16452 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16453 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16454 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16455 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
16456 // CHECK10:       omp.inner.for.end:
16457 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16458 // CHECK10:       omp.loop.exit:
16459 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16460 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
16461 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
16462 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16463 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
16464 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16465 // CHECK10:       .omp.final.then:
16466 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16467 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
16468 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16469 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
16470 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
16471 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
16472 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16473 // CHECK10:       .omp.final.done:
16474 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16475 // CHECK10:       omp.precond.end:
16476 // CHECK10-NEXT:    ret void
16477 //
16478 //
16479 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
16480 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16481 // CHECK10-NEXT:  entry:
16482 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16483 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16484 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16485 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16486 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16487 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16488 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16489 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16490 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16491 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16492 // CHECK10-NEXT:    ret void
16493 //
16494 //
16495 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..30
16496 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16497 // CHECK10-NEXT:  entry:
16498 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16499 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16500 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16501 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16502 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16503 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16504 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16505 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16506 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16507 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16508 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16509 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16510 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16511 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16512 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16513 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16514 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16515 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16516 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16517 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16518 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16519 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16520 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16521 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16522 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16523 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16524 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16525 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16526 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16527 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16528 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16529 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16530 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16531 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16532 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16533 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16534 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16535 // CHECK10:       omp.precond.then:
16536 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16537 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16538 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
16539 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16540 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16541 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16542 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
16543 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16544 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16545 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16546 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
16547 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16548 // CHECK10:       cond.true:
16549 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16550 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16551 // CHECK10:       cond.false:
16552 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16553 // CHECK10-NEXT:    br label [[COND_END]]
16554 // CHECK10:       cond.end:
16555 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
16556 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16557 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16558 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
16559 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16560 // CHECK10:       omp.inner.for.cond:
16561 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16562 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
16563 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16564 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16565 // CHECK10:       omp.inner.for.body:
16566 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
16567 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
16568 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
16569 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16570 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
16571 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16572 // CHECK10:       omp.inner.for.inc:
16573 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16574 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
16575 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
16576 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16577 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
16578 // CHECK10:       omp.inner.for.end:
16579 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16580 // CHECK10:       omp.loop.exit:
16581 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16582 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
16583 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
16584 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16585 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
16586 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16587 // CHECK10:       .omp.final.then:
16588 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16589 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
16590 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16591 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
16592 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
16593 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
16594 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16595 // CHECK10:       .omp.final.done:
16596 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16597 // CHECK10:       omp.precond.end:
16598 // CHECK10-NEXT:    ret void
16599 //
16600 //
16601 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..31
16602 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16603 // CHECK10-NEXT:  entry:
16604 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16605 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16606 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16607 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16608 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16609 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16610 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16611 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16612 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16613 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16614 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16615 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16616 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16617 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16618 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16619 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16620 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16621 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16622 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16623 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16624 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16625 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16626 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16627 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16628 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16629 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16630 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16631 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16632 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16633 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16634 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16635 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16636 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16637 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16638 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16639 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16640 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16641 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16642 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16643 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16644 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16645 // CHECK10:       omp.precond.then:
16646 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16647 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16648 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16649 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16650 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16651 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16652 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16653 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16654 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16655 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16656 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16657 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16658 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16659 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16660 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16661 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16662 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16663 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16664 // CHECK10:       cond.true:
16665 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16666 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16667 // CHECK10:       cond.false:
16668 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16669 // CHECK10-NEXT:    br label [[COND_END]]
16670 // CHECK10:       cond.end:
16671 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16672 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16673 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16674 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16675 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16676 // CHECK10:       omp.inner.for.cond:
16677 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16678 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
16679 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16680 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16681 // CHECK10:       omp.inner.for.body:
16682 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16683 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16684 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16685 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
16686 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
16687 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16688 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16689 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16690 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
16691 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
16692 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16693 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16694 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16695 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
16696 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16697 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
16698 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16699 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16700 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16701 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
16702 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16703 // CHECK10:       omp.body.continue:
16704 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16705 // CHECK10:       omp.inner.for.inc:
16706 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16707 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16708 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16709 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
16710 // CHECK10:       omp.inner.for.end:
16711 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16712 // CHECK10:       omp.loop.exit:
16713 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16714 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
16715 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
16716 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16717 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
16718 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16719 // CHECK10:       .omp.final.then:
16720 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16721 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
16722 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16723 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
16724 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
16725 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
16726 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16727 // CHECK10:       .omp.final.done:
16728 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16729 // CHECK10:       omp.precond.end:
16730 // CHECK10-NEXT:    ret void
16731 //
16732 //
16733 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
16734 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16735 // CHECK10-NEXT:  entry:
16736 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
16737 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16738 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16739 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16740 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16741 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
16742 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16743 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16744 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16745 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16746 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
16747 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16748 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16749 // CHECK10-NEXT:    ret void
16750 //
16751 //
16752 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..34
16753 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16754 // CHECK10-NEXT:  entry:
16755 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16756 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16757 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
16758 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16759 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16760 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16761 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16762 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16763 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16764 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16765 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16766 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16767 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16768 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16769 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16770 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16771 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16772 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16773 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16774 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
16775 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16776 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16777 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16778 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16779 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
16780 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16781 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16782 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16783 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16784 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
16785 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
16786 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16787 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
16788 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16789 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16790 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16791 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16792 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16793 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
16794 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16795 // CHECK10:       omp.precond.then:
16796 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16797 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16798 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
16799 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16800 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16801 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
16802 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16803 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16804 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
16805 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16806 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16807 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16808 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16809 // CHECK10:       cond.true:
16810 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16811 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16812 // CHECK10:       cond.false:
16813 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16814 // CHECK10-NEXT:    br label [[COND_END]]
16815 // CHECK10:       cond.end:
16816 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16817 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16818 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16819 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16820 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16821 // CHECK10:       omp.inner.for.cond:
16822 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16823 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16824 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
16825 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
16826 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16827 // CHECK10:       omp.inner.for.body:
16828 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16829 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16830 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16831 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
16832 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
16833 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16834 // CHECK10:       omp.inner.for.inc:
16835 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16836 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16837 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
16838 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16839 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16840 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16841 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
16842 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16843 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16844 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16845 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
16846 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16847 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16848 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16849 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
16850 // CHECK10-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
16851 // CHECK10:       cond.true10:
16852 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16853 // CHECK10-NEXT:    br label [[COND_END12:%.*]]
16854 // CHECK10:       cond.false11:
16855 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16856 // CHECK10-NEXT:    br label [[COND_END12]]
16857 // CHECK10:       cond.end12:
16858 // CHECK10-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
16859 // CHECK10-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16860 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16861 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16862 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
16863 // CHECK10:       omp.inner.for.end:
16864 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16865 // CHECK10:       omp.loop.exit:
16866 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16867 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
16868 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
16869 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16870 // CHECK10-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
16871 // CHECK10-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16872 // CHECK10:       .omp.final.then:
16873 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16874 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
16875 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
16876 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
16877 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
16878 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
16879 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16880 // CHECK10:       .omp.final.done:
16881 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16882 // CHECK10:       omp.precond.end:
16883 // CHECK10-NEXT:    ret void
16884 //
16885 //
16886 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..35
16887 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16888 // CHECK10-NEXT:  entry:
16889 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16890 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16891 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16892 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16893 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16894 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16895 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16896 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16897 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16898 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16899 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16900 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16901 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16902 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16903 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16904 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16905 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16906 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16907 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16908 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16909 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16910 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16911 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16912 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16913 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16914 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16915 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16916 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16917 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16918 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16919 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16920 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16921 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16922 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16923 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16924 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16925 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16926 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16927 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16928 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16929 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16930 // CHECK10:       omp.precond.then:
16931 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16932 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16933 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16934 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16935 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16936 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16937 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16938 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16939 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16940 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16941 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16942 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16943 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16944 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16945 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16946 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16947 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16948 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16949 // CHECK10:       cond.true:
16950 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16951 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16952 // CHECK10:       cond.false:
16953 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16954 // CHECK10-NEXT:    br label [[COND_END]]
16955 // CHECK10:       cond.end:
16956 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16957 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16958 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16959 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16960 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16961 // CHECK10:       omp.inner.for.cond:
16962 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16963 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
16964 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16965 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16966 // CHECK10:       omp.inner.for.body:
16967 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16968 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16969 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16970 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
16971 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
16972 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16973 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16974 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16975 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
16976 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
16977 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16978 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16979 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16980 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
16981 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16982 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
16983 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16984 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16985 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16986 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
16987 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16988 // CHECK10:       omp.body.continue:
16989 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16990 // CHECK10:       omp.inner.for.inc:
16991 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16992 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16993 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16994 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
16995 // CHECK10:       omp.inner.for.end:
16996 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16997 // CHECK10:       omp.loop.exit:
16998 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16999 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
17000 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
17001 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17002 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
17003 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17004 // CHECK10:       .omp.final.then:
17005 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17006 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
17007 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
17008 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
17009 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
17010 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
17011 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17012 // CHECK10:       .omp.final.done:
17013 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17014 // CHECK10:       omp.precond.end:
17015 // CHECK10-NEXT:    ret void
17016 //
17017 //
17018 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
17019 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17020 // CHECK10-NEXT:  entry:
17021 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17022 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17023 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17024 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17025 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17026 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17027 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17028 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17029 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17030 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17031 // CHECK10-NEXT:    ret void
17032 //
17033 //
17034 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..38
17035 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17036 // CHECK10-NEXT:  entry:
17037 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17038 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17039 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17040 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17041 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17042 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17043 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17044 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17045 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17046 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17047 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17048 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17049 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17050 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17051 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17052 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
17053 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17054 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17055 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17056 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17057 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17058 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17059 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17060 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17061 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17062 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17063 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17064 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17065 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17066 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17067 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17068 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17069 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17070 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17071 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17072 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17073 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17074 // CHECK10:       omp.precond.then:
17075 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17076 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17077 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
17078 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17079 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17080 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17081 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17082 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17083 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17084 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17085 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17086 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17087 // CHECK10:       cond.true:
17088 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17089 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17090 // CHECK10:       cond.false:
17091 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17092 // CHECK10-NEXT:    br label [[COND_END]]
17093 // CHECK10:       cond.end:
17094 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17095 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17096 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17097 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17098 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17099 // CHECK10:       omp.inner.for.cond:
17100 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17101 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
17102 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17103 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17104 // CHECK10:       omp.inner.for.body:
17105 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
17106 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
17107 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
17108 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17109 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
17110 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17111 // CHECK10:       omp.inner.for.inc:
17112 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17113 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
17114 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
17115 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17116 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
17117 // CHECK10:       omp.inner.for.end:
17118 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17119 // CHECK10:       omp.loop.exit:
17120 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17121 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
17122 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
17123 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17124 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
17125 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17126 // CHECK10:       .omp.final.then:
17127 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17128 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
17129 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17130 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17131 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17132 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17133 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17134 // CHECK10:       .omp.final.done:
17135 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17136 // CHECK10:       omp.precond.end:
17137 // CHECK10-NEXT:    ret void
17138 //
17139 //
17140 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..39
17141 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17142 // CHECK10-NEXT:  entry:
17143 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17144 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17145 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17146 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17147 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17148 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17149 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17150 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17151 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17152 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17153 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17154 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17155 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17156 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17157 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17158 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17159 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17160 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17161 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17162 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17163 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17164 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17165 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17166 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17167 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17168 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17169 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17170 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17171 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17172 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17173 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17174 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17175 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17176 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17177 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17178 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17179 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17180 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17181 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17182 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17183 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17184 // CHECK10:       omp.precond.then:
17185 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17186 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17187 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17188 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17189 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
17190 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17191 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
17192 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
17193 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
17194 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17195 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17196 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17197 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17198 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17199 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17200 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17201 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17202 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17203 // CHECK10:       cond.true:
17204 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17205 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17206 // CHECK10:       cond.false:
17207 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17208 // CHECK10-NEXT:    br label [[COND_END]]
17209 // CHECK10:       cond.end:
17210 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17211 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17212 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17213 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17214 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17215 // CHECK10:       omp.inner.for.cond:
17216 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17217 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
17218 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17219 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17220 // CHECK10:       omp.inner.for.body:
17221 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17222 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
17223 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17224 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
17225 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
17226 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17227 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
17228 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
17229 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
17230 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
17231 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17232 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
17233 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
17234 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
17235 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
17236 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
17237 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17238 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
17239 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
17240 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
17241 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17242 // CHECK10:       omp.body.continue:
17243 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17244 // CHECK10:       omp.inner.for.inc:
17245 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17246 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
17247 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17248 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
17249 // CHECK10:       omp.inner.for.end:
17250 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17251 // CHECK10:       omp.loop.exit:
17252 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17253 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
17254 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
17255 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17256 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
17257 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17258 // CHECK10:       .omp.final.then:
17259 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17260 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
17261 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
17262 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
17263 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
17264 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
17265 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17266 // CHECK10:       .omp.final.done:
17267 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17268 // CHECK10:       omp.precond.end:
17269 // CHECK10-NEXT:    ret void
17270 //
17271 //
17272 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
17273 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17274 // CHECK10-NEXT:  entry:
17275 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
17276 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17277 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17278 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17279 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17280 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
17281 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17282 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17283 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17284 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17285 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
17286 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17287 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17288 // CHECK10-NEXT:    ret void
17289 //
17290 //
17291 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..42
17292 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17293 // CHECK10-NEXT:  entry:
17294 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17295 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17296 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
17297 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17298 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17299 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17300 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17301 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17302 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17303 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17304 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17305 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17306 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17307 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17308 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17309 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17310 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17311 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17312 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
17313 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17314 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17315 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
17316 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17317 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17318 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17319 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17320 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
17321 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17322 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17323 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17324 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17325 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
17326 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
17327 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
17328 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17329 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17330 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
17331 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17332 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17333 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17334 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17335 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17336 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
17337 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17338 // CHECK10:       omp.precond.then:
17339 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17340 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17341 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
17342 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17343 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17344 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17345 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17346 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17347 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17348 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17349 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17350 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17351 // CHECK10:       cond.true:
17352 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17353 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17354 // CHECK10:       cond.false:
17355 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17356 // CHECK10-NEXT:    br label [[COND_END]]
17357 // CHECK10:       cond.end:
17358 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17359 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17360 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17361 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17362 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17363 // CHECK10:       omp.inner.for.cond:
17364 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17365 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
17366 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17367 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17368 // CHECK10:       omp.inner.for.body:
17369 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
17370 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17371 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
17372 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
17373 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
17374 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
17375 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
17376 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
17377 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
17378 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17379 // CHECK10:       omp.inner.for.inc:
17380 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17381 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
17382 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
17383 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17384 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
17385 // CHECK10:       omp.inner.for.end:
17386 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17387 // CHECK10:       omp.loop.exit:
17388 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17389 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
17390 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
17391 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17392 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
17393 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17394 // CHECK10:       .omp.final.then:
17395 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17396 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
17397 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17398 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
17399 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
17400 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
17401 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17402 // CHECK10:       .omp.final.done:
17403 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17404 // CHECK10:       omp.precond.end:
17405 // CHECK10-NEXT:    ret void
17406 //
17407 //
17408 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..43
17409 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
17410 // CHECK10-NEXT:  entry:
17411 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17412 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17413 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17414 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17415 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17416 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17417 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17418 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17419 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17420 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17421 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17422 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17423 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17424 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17425 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17426 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17427 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17428 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17429 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
17430 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17431 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17432 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17433 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17434 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17435 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17436 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17437 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17438 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17439 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17440 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17441 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17442 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17443 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17444 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17445 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17446 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17447 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17448 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17449 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17450 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17451 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17452 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17453 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17454 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17455 // CHECK10:       omp.precond.then:
17456 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17457 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17458 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17459 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17460 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
17461 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17462 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
17463 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
17464 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
17465 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17466 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17467 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
17468 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17469 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
17470 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
17471 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17472 // CHECK10:       omp.dispatch.cond:
17473 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17474 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17475 // CHECK10-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
17476 // CHECK10-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
17477 // CHECK10-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17478 // CHECK10:       cond.true:
17479 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17480 // CHECK10-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
17481 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17482 // CHECK10:       cond.false:
17483 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17484 // CHECK10-NEXT:    br label [[COND_END]]
17485 // CHECK10:       cond.end:
17486 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
17487 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17488 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17489 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
17490 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17491 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17492 // CHECK10-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
17493 // CHECK10-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17494 // CHECK10:       omp.dispatch.body:
17495 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17496 // CHECK10:       omp.inner.for.cond:
17497 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17498 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
17499 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
17500 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17501 // CHECK10:       omp.inner.for.body:
17502 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17503 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
17504 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17505 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
17506 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
17507 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17508 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
17509 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
17510 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
17511 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
17512 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17513 // CHECK10-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
17514 // CHECK10-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM12]]
17515 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !89
17516 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
17517 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
17518 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17519 // CHECK10-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
17520 // CHECK10-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM15]]
17521 // CHECK10-NEXT:    store i32 [[ADD14]], i32* [[ARRAYIDX16]], align 4, !llvm.access.group !89
17522 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17523 // CHECK10:       omp.body.continue:
17524 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17525 // CHECK10:       omp.inner.for.inc:
17526 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17527 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
17528 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17529 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
17530 // CHECK10:       omp.inner.for.end:
17531 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17532 // CHECK10:       omp.dispatch.inc:
17533 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17534 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17535 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
17536 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
17537 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17538 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17539 // CHECK10-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
17540 // CHECK10-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
17541 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
17542 // CHECK10:       omp.dispatch.end:
17543 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17544 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
17545 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
17546 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17547 // CHECK10-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
17548 // CHECK10-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17549 // CHECK10:       .omp.final.then:
17550 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17551 // CHECK10-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
17552 // CHECK10-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
17553 // CHECK10-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
17554 // CHECK10-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
17555 // CHECK10-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
17556 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17557 // CHECK10:       .omp.final.done:
17558 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17559 // CHECK10:       omp.precond.end:
17560 // CHECK10-NEXT:    ret void
17561 //
17562 //
17563 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
17564 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17565 // CHECK10-NEXT:  entry:
17566 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17567 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17568 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17569 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17570 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17571 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17572 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17573 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17574 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17575 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17576 // CHECK10-NEXT:    ret void
17577 //
17578 //
17579 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..46
17580 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17581 // CHECK10-NEXT:  entry:
17582 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17583 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17584 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17585 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17586 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17587 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17588 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17589 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17590 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17591 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17592 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17593 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17594 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17595 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17596 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17597 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
17598 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17599 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17600 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17601 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17602 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17603 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17604 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17605 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17606 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17607 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17608 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17609 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17610 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17611 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17612 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17613 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17614 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17615 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17616 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17617 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17618 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17619 // CHECK10:       omp.precond.then:
17620 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17621 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17622 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
17623 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17624 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17625 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17626 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17627 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17628 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17629 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17630 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17631 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17632 // CHECK10:       cond.true:
17633 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17634 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17635 // CHECK10:       cond.false:
17636 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17637 // CHECK10-NEXT:    br label [[COND_END]]
17638 // CHECK10:       cond.end:
17639 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17640 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17641 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17642 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17643 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17644 // CHECK10:       omp.inner.for.cond:
17645 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17646 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
17647 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17648 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17649 // CHECK10:       omp.inner.for.body:
17650 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
17651 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
17652 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
17653 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17654 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
17655 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17656 // CHECK10:       omp.inner.for.inc:
17657 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17658 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
17659 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
17660 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17661 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
17662 // CHECK10:       omp.inner.for.end:
17663 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17664 // CHECK10:       omp.loop.exit:
17665 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17666 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
17667 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
17668 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17669 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
17670 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17671 // CHECK10:       .omp.final.then:
17672 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17673 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
17674 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17675 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17676 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17677 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17678 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17679 // CHECK10:       .omp.final.done:
17680 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17681 // CHECK10:       omp.precond.end:
17682 // CHECK10-NEXT:    ret void
17683 //
17684 //
17685 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..47
17686 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17687 // CHECK10-NEXT:  entry:
17688 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17689 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17690 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17691 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17692 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17693 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17694 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17695 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17696 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17697 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17698 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17699 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17700 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17701 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17702 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17703 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17704 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17705 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17706 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17707 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17708 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17709 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17710 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17711 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17712 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17713 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17714 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17715 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17716 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17717 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17718 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17719 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17720 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17721 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17722 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17723 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17724 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17725 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17726 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17727 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17728 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17729 // CHECK10:       omp.precond.then:
17730 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17731 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17732 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17733 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17734 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
17735 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17736 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
17737 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
17738 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
17739 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17740 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17741 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17742 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17743 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17744 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
17745 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
17746 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17747 // CHECK10:       omp.dispatch.cond:
17748 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17749 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
17750 // CHECK10-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
17751 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
17752 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17753 // CHECK10:       omp.dispatch.body:
17754 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17755 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
17756 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17757 // CHECK10:       omp.inner.for.cond:
17758 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17759 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
17760 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
17761 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17762 // CHECK10:       omp.inner.for.body:
17763 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17764 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
17765 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17766 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
17767 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
17768 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17769 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
17770 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
17771 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
17772 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
17773 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17774 // CHECK10-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
17775 // CHECK10-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
17776 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
17777 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
17778 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
17779 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17780 // CHECK10-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
17781 // CHECK10-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
17782 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
17783 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17784 // CHECK10:       omp.body.continue:
17785 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17786 // CHECK10:       omp.inner.for.inc:
17787 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17788 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
17789 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17790 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
17791 // CHECK10:       omp.inner.for.end:
17792 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17793 // CHECK10:       omp.dispatch.inc:
17794 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
17795 // CHECK10:       omp.dispatch.end:
17796 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17797 // CHECK10-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
17798 // CHECK10-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17799 // CHECK10:       .omp.final.then:
17800 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17801 // CHECK10-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
17802 // CHECK10-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
17803 // CHECK10-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
17804 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
17805 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
17806 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17807 // CHECK10:       .omp.final.done:
17808 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17809 // CHECK10:       omp.precond.end:
17810 // CHECK10-NEXT:    ret void
17811 //
17812 //
17813 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
17814 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17815 // CHECK10-NEXT:  entry:
17816 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
17817 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17818 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17819 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17820 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17821 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
17822 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17823 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17824 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17825 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17826 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
17827 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17828 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17829 // CHECK10-NEXT:    ret void
17830 //
17831 //
17832 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..50
17833 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17834 // CHECK10-NEXT:  entry:
17835 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17836 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17837 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
17838 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17839 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17840 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17841 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17842 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17843 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17844 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17845 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17846 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17847 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17848 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17849 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17850 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17851 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17852 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17853 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
17854 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17855 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17856 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
17857 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17858 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17859 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17860 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17861 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
17862 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17863 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17864 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17865 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17866 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
17867 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
17868 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
17869 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17870 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17871 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
17872 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17873 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17874 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17875 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17876 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17877 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
17878 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17879 // CHECK10:       omp.precond.then:
17880 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17881 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17882 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
17883 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17884 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17885 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17886 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17887 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17888 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17889 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17890 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17891 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17892 // CHECK10:       cond.true:
17893 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17894 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17895 // CHECK10:       cond.false:
17896 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17897 // CHECK10-NEXT:    br label [[COND_END]]
17898 // CHECK10:       cond.end:
17899 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17900 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17901 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17902 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17903 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17904 // CHECK10:       omp.inner.for.cond:
17905 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17906 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
17907 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17908 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17909 // CHECK10:       omp.inner.for.body:
17910 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
17911 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17912 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
17913 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
17914 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
17915 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
17916 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
17917 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
17918 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
17919 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17920 // CHECK10:       omp.inner.for.inc:
17921 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17922 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
17923 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
17924 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17925 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
17926 // CHECK10:       omp.inner.for.end:
17927 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17928 // CHECK10:       omp.loop.exit:
17929 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17930 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
17931 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
17932 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17933 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
17934 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17935 // CHECK10:       .omp.final.then:
17936 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17937 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
17938 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17939 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
17940 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
17941 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
17942 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17943 // CHECK10:       .omp.final.done:
17944 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17945 // CHECK10:       omp.precond.end:
17946 // CHECK10-NEXT:    ret void
17947 //
17948 //
17949 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..51
17950 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
17951 // CHECK10-NEXT:  entry:
17952 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17953 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17954 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17955 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17956 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17957 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17958 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17959 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17960 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17961 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17962 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17963 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17964 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17965 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17966 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17967 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17968 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17969 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17970 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
17971 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17972 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17973 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17974 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17975 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17976 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17977 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17978 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17979 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17980 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17981 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17982 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17983 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17984 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17985 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17986 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17987 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17988 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17989 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17990 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17991 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17992 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17993 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17994 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17995 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17996 // CHECK10:       omp.precond.then:
17997 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17998 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17999 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18000 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
18001 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
18002 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18003 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
18004 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
18005 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
18006 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18007 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18008 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
18009 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18010 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18011 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18012 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
18013 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
18014 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18015 // CHECK10:       omp.dispatch.cond:
18016 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18017 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
18018 // CHECK10-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
18019 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
18020 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18021 // CHECK10:       omp.dispatch.body:
18022 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18023 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
18024 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18025 // CHECK10:       omp.inner.for.cond:
18026 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18027 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
18028 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
18029 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18030 // CHECK10:       omp.inner.for.body:
18031 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18032 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
18033 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18034 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
18035 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
18036 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18037 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
18038 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
18039 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
18040 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
18041 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18042 // CHECK10-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
18043 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
18044 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
18045 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
18046 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
18047 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18048 // CHECK10-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
18049 // CHECK10-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
18050 // CHECK10-NEXT:    store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
18051 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18052 // CHECK10:       omp.body.continue:
18053 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18054 // CHECK10:       omp.inner.for.inc:
18055 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18056 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
18057 // CHECK10-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18058 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
18059 // CHECK10:       omp.inner.for.end:
18060 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18061 // CHECK10:       omp.dispatch.inc:
18062 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
18063 // CHECK10:       omp.dispatch.end:
18064 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18065 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18066 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18067 // CHECK10:       .omp.final.then:
18068 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18069 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
18070 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
18071 // CHECK10-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
18072 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
18073 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
18074 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18075 // CHECK10:       .omp.final.done:
18076 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
18077 // CHECK10:       omp.precond.end:
18078 // CHECK10-NEXT:    ret void
18079 //
18080 //
18081 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
18082 // CHECK10-SAME: () #[[ATTR4:[0-9]+]] {
18083 // CHECK10-NEXT:  entry:
18084 // CHECK10-NEXT:    call void @__tgt_register_requires(i64 1)
18085 // CHECK10-NEXT:    ret void
18086 //
18087 //
18088 // CHECK11-LABEL: define {{[^@]+}}@main
18089 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
18090 // CHECK11-NEXT:  entry:
18091 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
18092 // CHECK11-NEXT:    [[A:%.*]] = alloca double*, align 4
18093 // CHECK11-NEXT:    [[B:%.*]] = alloca double*, align 4
18094 // CHECK11-NEXT:    [[C:%.*]] = alloca double*, align 4
18095 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
18096 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
18097 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18098 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
18099 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
18100 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
18101 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18102 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18103 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18104 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
18105 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
18106 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
18107 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
18108 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
18109 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
18110 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
18111 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
18112 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
18113 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
18114 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
18115 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
18116 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
18117 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
18118 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
18119 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
18120 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
18121 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
18122 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
18123 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
18124 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
18125 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
18126 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
18127 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
18128 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
18129 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
18130 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
18131 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
18132 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
18133 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
18134 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
18135 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
18136 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
18137 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
18138 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
18139 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
18140 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
18141 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
18142 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
18143 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
18144 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
18145 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
18146 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
18147 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
18148 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
18149 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
18150 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
18151 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
18152 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
18153 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
18154 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
18155 // CHECK11-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 4
18156 // CHECK11-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 4
18157 // CHECK11-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 4
18158 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18159 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
18160 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
18161 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18162 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
18163 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
18164 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
18165 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
18166 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
18167 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
18168 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 4
18169 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
18170 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
18171 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 4
18172 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
18173 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
18174 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
18175 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
18176 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 4
18177 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
18178 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
18179 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 4
18180 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
18181 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
18182 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
18183 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
18184 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 4
18185 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
18186 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
18187 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 4
18188 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
18189 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
18190 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18191 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18192 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
18193 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
18194 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18195 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
18196 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18197 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18198 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18199 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18200 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
18201 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
18202 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
18203 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18204 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18205 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
18206 // CHECK11:       omp_offload.failed:
18207 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
18208 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
18209 // CHECK11:       omp_offload.cont:
18210 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
18211 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
18212 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
18213 // CHECK11-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 4
18214 // CHECK11-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 4
18215 // CHECK11-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 4
18216 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
18217 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
18218 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
18219 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
18220 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
18221 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
18222 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
18223 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
18224 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
18225 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
18226 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 4
18227 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
18228 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
18229 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 4
18230 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
18231 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
18232 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
18233 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
18234 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 4
18235 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
18236 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
18237 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 4
18238 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
18239 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
18240 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
18241 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
18242 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 4
18243 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
18244 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
18245 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 4
18246 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
18247 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
18248 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
18249 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
18250 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
18251 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
18252 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
18253 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
18254 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
18255 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
18256 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
18257 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
18258 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
18259 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
18260 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
18261 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18262 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
18263 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
18264 // CHECK11:       omp_offload.failed14:
18265 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
18266 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
18267 // CHECK11:       omp_offload.cont15:
18268 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
18269 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
18270 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
18271 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
18272 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
18273 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
18274 // CHECK11-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 4
18275 // CHECK11-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 4
18276 // CHECK11-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 4
18277 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
18278 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
18279 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
18280 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
18281 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
18282 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
18283 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
18284 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
18285 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
18286 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
18287 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
18288 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
18289 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
18290 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
18291 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
18292 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
18293 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
18294 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
18295 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 4
18296 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
18297 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
18298 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 4
18299 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
18300 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
18301 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
18302 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
18303 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 4
18304 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
18305 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
18306 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 4
18307 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
18308 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
18309 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
18310 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
18311 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 4
18312 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
18313 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
18314 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 4
18315 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
18316 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
18317 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
18318 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
18319 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
18320 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
18321 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
18322 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
18323 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
18324 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
18325 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
18326 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
18327 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
18328 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
18329 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
18330 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18331 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
18332 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
18333 // CHECK11:       omp_offload.failed27:
18334 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP67]], i32 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
18335 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
18336 // CHECK11:       omp_offload.cont28:
18337 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
18338 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
18339 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
18340 // CHECK11-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 4
18341 // CHECK11-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 4
18342 // CHECK11-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 4
18343 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
18344 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
18345 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
18346 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
18347 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
18348 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
18349 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
18350 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
18351 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
18352 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
18353 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 4
18354 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
18355 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
18356 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 4
18357 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
18358 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
18359 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
18360 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
18361 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 4
18362 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
18363 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
18364 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 4
18365 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
18366 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
18367 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
18368 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
18369 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 4
18370 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
18371 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
18372 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 4
18373 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
18374 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
18375 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
18376 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
18377 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
18378 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
18379 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
18380 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
18381 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
18382 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
18383 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
18384 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
18385 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
18386 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
18387 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
18388 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18389 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
18390 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
18391 // CHECK11:       omp_offload.failed40:
18392 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
18393 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
18394 // CHECK11:       omp_offload.cont41:
18395 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
18396 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
18397 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
18398 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
18399 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
18400 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
18401 // CHECK11-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 4
18402 // CHECK11-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 4
18403 // CHECK11-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 4
18404 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
18405 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
18406 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
18407 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
18408 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
18409 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
18410 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
18411 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
18412 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
18413 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
18414 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
18415 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
18416 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
18417 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
18418 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
18419 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
18420 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
18421 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
18422 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 4
18423 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
18424 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
18425 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 4
18426 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
18427 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
18428 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
18429 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
18430 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 4
18431 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
18432 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
18433 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 4
18434 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
18435 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
18436 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
18437 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
18438 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 4
18439 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
18440 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
18441 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 4
18442 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
18443 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
18444 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
18445 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
18446 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
18447 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
18448 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
18449 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
18450 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
18451 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
18452 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
18453 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
18454 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
18455 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
18456 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
18457 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18458 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
18459 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
18460 // CHECK11:       omp_offload.failed54:
18461 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP140]], i32 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
18462 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
18463 // CHECK11:       omp_offload.cont55:
18464 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
18465 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
18466 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
18467 // CHECK11-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 4
18468 // CHECK11-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 4
18469 // CHECK11-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 4
18470 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
18471 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
18472 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
18473 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
18474 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
18475 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
18476 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
18477 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
18478 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
18479 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
18480 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 4
18481 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
18482 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
18483 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 4
18484 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
18485 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
18486 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
18487 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
18488 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 4
18489 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
18490 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
18491 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 4
18492 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
18493 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
18494 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
18495 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
18496 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 4
18497 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
18498 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
18499 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 4
18500 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
18501 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
18502 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
18503 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
18504 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
18505 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
18506 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
18507 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
18508 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
18509 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
18510 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
18511 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
18512 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
18513 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
18514 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
18515 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18516 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
18517 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
18518 // CHECK11:       omp_offload.failed67:
18519 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
18520 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
18521 // CHECK11:       omp_offload.cont68:
18522 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
18523 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
18524 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
18525 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
18526 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
18527 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
18528 // CHECK11-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 4
18529 // CHECK11-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 4
18530 // CHECK11-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 4
18531 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
18532 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
18533 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
18534 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
18535 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
18536 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
18537 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
18538 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
18539 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
18540 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
18541 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
18542 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
18543 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
18544 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
18545 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
18546 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
18547 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
18548 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
18549 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 4
18550 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
18551 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
18552 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 4
18553 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
18554 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
18555 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
18556 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
18557 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 4
18558 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
18559 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
18560 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 4
18561 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
18562 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
18563 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
18564 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
18565 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 4
18566 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
18567 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
18568 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 4
18569 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
18570 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
18571 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
18572 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
18573 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
18574 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
18575 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
18576 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
18577 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
18578 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
18579 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
18580 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
18581 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
18582 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
18583 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
18584 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18585 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
18586 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
18587 // CHECK11:       omp_offload.failed81:
18588 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP213]], i32 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
18589 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
18590 // CHECK11:       omp_offload.cont82:
18591 // CHECK11-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
18592 // CHECK11-NEXT:    ret i32 [[CALL]]
18593 //
18594 //
18595 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
18596 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
18597 // CHECK11-NEXT:  entry:
18598 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18599 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
18600 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
18601 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
18602 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18603 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
18604 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
18605 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
18606 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
18607 // CHECK11-NEXT:    ret void
18608 //
18609 //
18610 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
18611 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18612 // CHECK11-NEXT:  entry:
18613 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18614 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18615 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18616 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18617 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18618 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18619 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18620 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18621 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18622 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18623 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18624 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18625 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18626 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18627 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18628 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18629 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18630 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18631 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18632 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18633 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18634 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18635 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18636 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18637 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18638 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18639 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18640 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18641 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18642 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18643 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18644 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18645 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18646 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18647 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18648 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18649 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18650 // CHECK11:       omp.precond.then:
18651 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18652 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18653 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
18654 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18655 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18656 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18657 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
18658 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18659 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18660 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18661 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
18662 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18663 // CHECK11:       cond.true:
18664 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18665 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18666 // CHECK11:       cond.false:
18667 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18668 // CHECK11-NEXT:    br label [[COND_END]]
18669 // CHECK11:       cond.end:
18670 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
18671 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18672 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18673 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
18674 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18675 // CHECK11:       omp.inner.for.cond:
18676 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18677 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
18678 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
18679 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18680 // CHECK11:       omp.inner.for.body:
18681 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
18682 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
18683 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
18684 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18685 // CHECK11:       omp.inner.for.inc:
18686 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18687 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
18688 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
18689 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18690 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
18691 // CHECK11:       omp.inner.for.end:
18692 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18693 // CHECK11:       omp.loop.exit:
18694 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18695 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
18696 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
18697 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18698 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
18699 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18700 // CHECK11:       .omp.final.then:
18701 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18702 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
18703 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
18704 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
18705 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
18706 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
18707 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18708 // CHECK11:       .omp.final.done:
18709 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18710 // CHECK11:       omp.precond.end:
18711 // CHECK11-NEXT:    ret void
18712 //
18713 //
18714 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
18715 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18716 // CHECK11-NEXT:  entry:
18717 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18718 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18719 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18720 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18721 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18722 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18723 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18724 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18725 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18726 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18727 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18728 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18729 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18730 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18731 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18732 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18733 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18734 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18735 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18736 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18737 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18738 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18739 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18740 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18741 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18742 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18743 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18744 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18745 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18746 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18747 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18748 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18749 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18750 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18751 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18752 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18753 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18754 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18755 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18756 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18757 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18758 // CHECK11:       omp.precond.then:
18759 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18760 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18761 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18762 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18763 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18764 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
18765 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
18766 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18767 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18768 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18769 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
18770 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18771 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18772 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18773 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
18774 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18775 // CHECK11:       cond.true:
18776 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18777 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18778 // CHECK11:       cond.false:
18779 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18780 // CHECK11-NEXT:    br label [[COND_END]]
18781 // CHECK11:       cond.end:
18782 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
18783 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18784 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18785 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
18786 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18787 // CHECK11:       omp.inner.for.cond:
18788 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18789 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
18790 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
18791 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18792 // CHECK11:       omp.inner.for.body:
18793 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18794 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
18795 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18796 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
18797 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
18798 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18799 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
18800 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
18801 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
18802 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18803 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
18804 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
18805 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
18806 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
18807 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18808 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
18809 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
18810 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18811 // CHECK11:       omp.body.continue:
18812 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18813 // CHECK11:       omp.inner.for.inc:
18814 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18815 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
18816 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18817 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
18818 // CHECK11:       omp.inner.for.end:
18819 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18820 // CHECK11:       omp.loop.exit:
18821 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18822 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
18823 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
18824 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18825 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18826 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18827 // CHECK11:       .omp.final.then:
18828 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18829 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
18830 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
18831 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
18832 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
18833 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
18834 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18835 // CHECK11:       .omp.final.done:
18836 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18837 // CHECK11:       omp.precond.end:
18838 // CHECK11-NEXT:    ret void
18839 //
18840 //
18841 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
18842 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
18843 // CHECK11-NEXT:  entry:
18844 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18845 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
18846 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
18847 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
18848 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18849 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
18850 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
18851 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
18852 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
18853 // CHECK11-NEXT:    ret void
18854 //
18855 //
18856 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
18857 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18858 // CHECK11-NEXT:  entry:
18859 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18860 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18861 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18862 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18863 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18864 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18865 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18866 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18867 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18868 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18869 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18870 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18871 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18872 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18873 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18874 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18875 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18876 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18877 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18878 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18879 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18880 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18881 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18882 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18883 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18884 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18885 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18886 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18887 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18888 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18889 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18890 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18891 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18892 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18893 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18894 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18895 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18896 // CHECK11:       omp.precond.then:
18897 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18898 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18899 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
18900 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18901 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18902 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18903 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
18904 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18905 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18906 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18907 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
18908 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18909 // CHECK11:       cond.true:
18910 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18911 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18912 // CHECK11:       cond.false:
18913 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18914 // CHECK11-NEXT:    br label [[COND_END]]
18915 // CHECK11:       cond.end:
18916 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
18917 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18918 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18919 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
18920 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18921 // CHECK11:       omp.inner.for.cond:
18922 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18923 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
18924 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
18925 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18926 // CHECK11:       omp.inner.for.body:
18927 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
18928 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
18929 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
18930 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18931 // CHECK11:       omp.inner.for.inc:
18932 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18933 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
18934 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
18935 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18936 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
18937 // CHECK11:       omp.inner.for.end:
18938 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18939 // CHECK11:       omp.loop.exit:
18940 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18941 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
18942 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
18943 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18944 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
18945 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18946 // CHECK11:       .omp.final.then:
18947 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18948 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
18949 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
18950 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
18951 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
18952 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
18953 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18954 // CHECK11:       .omp.final.done:
18955 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18956 // CHECK11:       omp.precond.end:
18957 // CHECK11-NEXT:    ret void
18958 //
18959 //
18960 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
18961 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18962 // CHECK11-NEXT:  entry:
18963 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18964 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18965 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18966 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18967 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18968 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18969 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18970 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18971 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18972 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18973 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18974 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18975 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18976 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18977 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18978 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18979 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18980 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18981 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18982 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18983 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18984 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18985 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18986 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18987 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18988 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18989 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18990 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18991 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18992 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18993 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18994 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18995 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18996 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18997 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18998 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18999 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19000 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19001 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19002 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19003 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19004 // CHECK11:       omp.precond.then:
19005 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19006 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19007 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19008 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19009 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19010 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19011 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19012 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19013 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19014 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19015 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19016 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19017 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19018 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19019 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19020 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19021 // CHECK11:       cond.true:
19022 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19023 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19024 // CHECK11:       cond.false:
19025 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19026 // CHECK11-NEXT:    br label [[COND_END]]
19027 // CHECK11:       cond.end:
19028 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19029 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19030 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19031 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19032 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19033 // CHECK11:       omp.inner.for.cond:
19034 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19035 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
19036 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19037 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19038 // CHECK11:       omp.inner.for.body:
19039 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19040 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19041 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19042 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
19043 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
19044 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19045 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19046 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
19047 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
19048 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19049 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19050 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
19051 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19052 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
19053 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19054 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19055 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
19056 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19057 // CHECK11:       omp.body.continue:
19058 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19059 // CHECK11:       omp.inner.for.inc:
19060 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19061 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19062 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19063 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
19064 // CHECK11:       omp.inner.for.end:
19065 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19066 // CHECK11:       omp.loop.exit:
19067 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19068 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19069 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19070 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19071 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19072 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19073 // CHECK11:       .omp.final.then:
19074 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19075 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19076 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19077 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19078 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19079 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19080 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19081 // CHECK11:       .omp.final.done:
19082 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19083 // CHECK11:       omp.precond.end:
19084 // CHECK11-NEXT:    ret void
19085 //
19086 //
19087 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
19088 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19089 // CHECK11-NEXT:  entry:
19090 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
19091 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19092 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19093 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19094 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19095 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
19096 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19097 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19098 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19099 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19100 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19101 // CHECK11-NEXT:    ret void
19102 //
19103 //
19104 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6
19105 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19106 // CHECK11-NEXT:  entry:
19107 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19108 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19109 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
19110 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19111 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19112 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19113 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19114 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19115 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19116 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19117 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19118 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19119 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19120 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19121 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19122 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19123 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19124 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19125 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19126 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
19127 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19128 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19129 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19130 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19131 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
19132 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19133 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
19134 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
19135 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
19136 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
19137 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
19138 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19139 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
19140 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19141 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19142 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19143 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19144 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19145 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
19146 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19147 // CHECK11:       omp.precond.then:
19148 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19149 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19150 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
19151 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19152 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19153 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
19154 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19155 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19156 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
19157 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19158 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19159 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19160 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19161 // CHECK11:       cond.true:
19162 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19163 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19164 // CHECK11:       cond.false:
19165 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19166 // CHECK11-NEXT:    br label [[COND_END]]
19167 // CHECK11:       cond.end:
19168 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19169 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19170 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19171 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19172 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19173 // CHECK11:       omp.inner.for.cond:
19174 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19175 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19176 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
19177 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
19178 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19179 // CHECK11:       omp.inner.for.body:
19180 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19181 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19182 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
19183 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19184 // CHECK11:       omp.inner.for.inc:
19185 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19186 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19187 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
19188 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19189 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19190 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19191 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
19192 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19193 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19194 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19195 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
19196 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19197 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19198 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19199 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
19200 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
19201 // CHECK11:       cond.true10:
19202 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19203 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
19204 // CHECK11:       cond.false11:
19205 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19206 // CHECK11-NEXT:    br label [[COND_END12]]
19207 // CHECK11:       cond.end12:
19208 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
19209 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19210 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19211 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19212 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
19213 // CHECK11:       omp.inner.for.end:
19214 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19215 // CHECK11:       omp.loop.exit:
19216 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19217 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
19218 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
19219 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19220 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
19221 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19222 // CHECK11:       .omp.final.then:
19223 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19224 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
19225 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
19226 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
19227 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
19228 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
19229 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19230 // CHECK11:       .omp.final.done:
19231 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19232 // CHECK11:       omp.precond.end:
19233 // CHECK11-NEXT:    ret void
19234 //
19235 //
19236 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7
19237 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19238 // CHECK11-NEXT:  entry:
19239 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19240 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19241 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19242 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19243 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19244 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19245 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19246 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19247 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19248 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19249 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19250 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19251 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19252 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19253 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19254 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19255 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19256 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19257 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19258 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19259 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19260 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19261 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19262 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19263 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19264 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19265 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19266 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19267 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19268 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19269 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19270 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19271 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19272 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19273 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19274 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19275 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19276 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19277 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19278 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19279 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19280 // CHECK11:       omp.precond.then:
19281 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19282 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19283 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19284 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19285 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19286 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19287 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19288 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19289 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19290 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19291 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19292 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19293 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19294 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19295 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19296 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19297 // CHECK11:       cond.true:
19298 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19299 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19300 // CHECK11:       cond.false:
19301 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19302 // CHECK11-NEXT:    br label [[COND_END]]
19303 // CHECK11:       cond.end:
19304 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19305 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19306 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19307 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19308 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19309 // CHECK11:       omp.inner.for.cond:
19310 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19311 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
19312 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19313 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19314 // CHECK11:       omp.inner.for.body:
19315 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19316 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19317 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19318 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
19319 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
19320 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19321 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19322 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
19323 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
19324 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19325 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19326 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
19327 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19328 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
19329 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19330 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19331 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
19332 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19333 // CHECK11:       omp.body.continue:
19334 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19335 // CHECK11:       omp.inner.for.inc:
19336 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19337 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19338 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19339 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
19340 // CHECK11:       omp.inner.for.end:
19341 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19342 // CHECK11:       omp.loop.exit:
19343 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19344 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19345 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19346 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19347 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19348 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19349 // CHECK11:       .omp.final.then:
19350 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19351 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19352 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19353 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19354 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19355 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19356 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19357 // CHECK11:       .omp.final.done:
19358 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19359 // CHECK11:       omp.precond.end:
19360 // CHECK11-NEXT:    ret void
19361 //
19362 //
19363 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
19364 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19365 // CHECK11-NEXT:  entry:
19366 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19367 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19368 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19369 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19370 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19371 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19372 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19373 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19374 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19375 // CHECK11-NEXT:    ret void
19376 //
19377 //
19378 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10
19379 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19380 // CHECK11-NEXT:  entry:
19381 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19382 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19383 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19384 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19385 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19386 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19387 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19388 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19389 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19390 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19391 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19392 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19393 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19394 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19395 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19396 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19397 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19398 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19399 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19400 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19401 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19402 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19403 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19404 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19405 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19406 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19407 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19408 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19409 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19410 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19411 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19412 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19413 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19414 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19415 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19416 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19417 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19418 // CHECK11:       omp.precond.then:
19419 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19420 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19421 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
19422 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19423 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19424 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19425 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19426 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19427 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19428 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19429 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
19430 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19431 // CHECK11:       cond.true:
19432 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19433 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19434 // CHECK11:       cond.false:
19435 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19436 // CHECK11-NEXT:    br label [[COND_END]]
19437 // CHECK11:       cond.end:
19438 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19439 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19440 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19441 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
19442 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19443 // CHECK11:       omp.inner.for.cond:
19444 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19445 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
19446 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
19447 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19448 // CHECK11:       omp.inner.for.body:
19449 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
19450 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
19451 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
19452 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19453 // CHECK11:       omp.inner.for.inc:
19454 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19455 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
19456 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
19457 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19458 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
19459 // CHECK11:       omp.inner.for.end:
19460 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19461 // CHECK11:       omp.loop.exit:
19462 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19463 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
19464 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
19465 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19466 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
19467 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19468 // CHECK11:       .omp.final.then:
19469 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19470 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
19471 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
19472 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
19473 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
19474 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
19475 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19476 // CHECK11:       .omp.final.done:
19477 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19478 // CHECK11:       omp.precond.end:
19479 // CHECK11-NEXT:    ret void
19480 //
19481 //
19482 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11
19483 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19484 // CHECK11-NEXT:  entry:
19485 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19486 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19487 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19488 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19489 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19490 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19491 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19492 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19493 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19494 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19495 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19496 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19497 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19498 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19499 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19500 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19501 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19502 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19503 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19504 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19505 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19506 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19507 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19508 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19509 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19510 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19511 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19512 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19513 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19514 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19515 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19516 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19517 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19518 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19519 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19520 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19521 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19522 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19523 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19524 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19525 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19526 // CHECK11:       omp.precond.then:
19527 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19528 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19529 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19530 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19531 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19532 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19533 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19534 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19535 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19536 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19537 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19538 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19539 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19540 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19541 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19542 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19543 // CHECK11:       cond.true:
19544 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19545 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19546 // CHECK11:       cond.false:
19547 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19548 // CHECK11-NEXT:    br label [[COND_END]]
19549 // CHECK11:       cond.end:
19550 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19551 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19552 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19553 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19554 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19555 // CHECK11:       omp.inner.for.cond:
19556 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19557 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
19558 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19559 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19560 // CHECK11:       omp.inner.for.body:
19561 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19562 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19563 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19564 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
19565 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
19566 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19567 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19568 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
19569 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
19570 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19571 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19572 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
19573 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19574 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
19575 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19576 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19577 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
19578 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19579 // CHECK11:       omp.body.continue:
19580 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19581 // CHECK11:       omp.inner.for.inc:
19582 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19583 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19584 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19585 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
19586 // CHECK11:       omp.inner.for.end:
19587 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19588 // CHECK11:       omp.loop.exit:
19589 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19590 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19591 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19592 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19593 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19594 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19595 // CHECK11:       .omp.final.then:
19596 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19597 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19598 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19599 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19600 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19601 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19602 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19603 // CHECK11:       .omp.final.done:
19604 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19605 // CHECK11:       omp.precond.end:
19606 // CHECK11-NEXT:    ret void
19607 //
19608 //
19609 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
19610 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19611 // CHECK11-NEXT:  entry:
19612 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
19613 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19614 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19615 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19616 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19617 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
19618 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19619 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19620 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19621 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19622 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19623 // CHECK11-NEXT:    ret void
19624 //
19625 //
19626 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14
19627 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19628 // CHECK11-NEXT:  entry:
19629 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19630 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19631 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
19632 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19633 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19634 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19635 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19636 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19637 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19638 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19639 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19640 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19641 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19642 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19643 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19644 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19645 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19646 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
19647 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19648 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19649 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19650 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
19651 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19652 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19653 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19654 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19655 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
19656 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19657 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
19658 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
19659 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
19660 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
19661 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
19662 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
19663 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19664 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19665 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
19666 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19667 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
19668 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19669 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19670 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19671 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
19672 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19673 // CHECK11:       omp.precond.then:
19674 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19675 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19676 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
19677 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19678 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19679 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19680 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19681 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19682 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19683 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19684 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19685 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19686 // CHECK11:       cond.true:
19687 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19688 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19689 // CHECK11:       cond.false:
19690 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19691 // CHECK11-NEXT:    br label [[COND_END]]
19692 // CHECK11:       cond.end:
19693 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19694 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19695 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19696 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19697 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19698 // CHECK11:       omp.inner.for.cond:
19699 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19700 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
19701 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19702 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19703 // CHECK11:       omp.inner.for.body:
19704 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
19705 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
19706 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
19707 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
19708 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
19709 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
19710 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19711 // CHECK11:       omp.inner.for.inc:
19712 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19713 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
19714 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
19715 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19716 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
19717 // CHECK11:       omp.inner.for.end:
19718 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19719 // CHECK11:       omp.loop.exit:
19720 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19721 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
19722 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
19723 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19724 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
19725 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19726 // CHECK11:       .omp.final.then:
19727 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19728 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
19729 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
19730 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
19731 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
19732 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
19733 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19734 // CHECK11:       .omp.final.done:
19735 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19736 // CHECK11:       omp.precond.end:
19737 // CHECK11-NEXT:    ret void
19738 //
19739 //
19740 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15
19741 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
19742 // CHECK11-NEXT:  entry:
19743 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19744 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19745 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19746 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19747 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19748 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19749 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19750 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19751 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19752 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19753 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19754 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19755 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19756 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19757 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19758 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19759 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19760 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19761 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
19762 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19763 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19764 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19765 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19766 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19767 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19768 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19769 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19770 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19771 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19772 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19773 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19774 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19775 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19776 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19777 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19778 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19779 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19780 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
19781 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19782 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19783 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19784 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19785 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19786 // CHECK11:       omp.precond.then:
19787 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19788 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19789 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19790 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19791 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19792 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19793 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19794 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19795 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19796 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19797 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19798 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
19799 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
19800 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19801 // CHECK11:       omp.dispatch.cond:
19802 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19803 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19804 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
19805 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19806 // CHECK11:       cond.true:
19807 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19808 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19809 // CHECK11:       cond.false:
19810 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19811 // CHECK11-NEXT:    br label [[COND_END]]
19812 // CHECK11:       cond.end:
19813 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
19814 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19815 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19816 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
19817 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19818 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19819 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
19820 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19821 // CHECK11:       omp.dispatch.body:
19822 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19823 // CHECK11:       omp.inner.for.cond:
19824 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19825 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
19826 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
19827 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19828 // CHECK11:       omp.inner.for.body:
19829 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19830 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
19831 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19832 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
19833 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
19834 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19835 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19836 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
19837 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
19838 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19839 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19840 // CHECK11-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
19841 // CHECK11-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
19842 // CHECK11-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
19843 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19844 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
19845 // CHECK11-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
19846 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19847 // CHECK11:       omp.body.continue:
19848 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19849 // CHECK11:       omp.inner.for.inc:
19850 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19851 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
19852 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19853 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
19854 // CHECK11:       omp.inner.for.end:
19855 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19856 // CHECK11:       omp.dispatch.inc:
19857 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19858 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19859 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
19860 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
19861 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19862 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19863 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
19864 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
19865 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
19866 // CHECK11:       omp.dispatch.end:
19867 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19868 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
19869 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
19870 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19871 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
19872 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19873 // CHECK11:       .omp.final.then:
19874 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19875 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
19876 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
19877 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
19878 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
19879 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
19880 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19881 // CHECK11:       .omp.final.done:
19882 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19883 // CHECK11:       omp.precond.end:
19884 // CHECK11-NEXT:    ret void
19885 //
19886 //
19887 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
19888 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19889 // CHECK11-NEXT:  entry:
19890 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19891 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19892 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19893 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19894 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19895 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19896 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19897 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19898 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19899 // CHECK11-NEXT:    ret void
19900 //
19901 //
19902 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18
19903 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19904 // CHECK11-NEXT:  entry:
19905 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19906 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19907 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19908 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19909 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19910 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19911 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19912 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19913 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19914 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19915 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19916 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19917 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19918 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19919 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19920 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19921 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19922 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19923 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19924 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19925 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19926 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19927 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19928 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19929 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19930 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19931 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19932 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19933 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19934 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19935 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19936 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19937 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19938 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19939 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19940 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19941 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19942 // CHECK11:       omp.precond.then:
19943 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19944 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19945 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
19946 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19947 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19948 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19949 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19950 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19951 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19952 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19953 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
19954 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19955 // CHECK11:       cond.true:
19956 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19957 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19958 // CHECK11:       cond.false:
19959 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19960 // CHECK11-NEXT:    br label [[COND_END]]
19961 // CHECK11:       cond.end:
19962 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19963 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19964 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19965 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
19966 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19967 // CHECK11:       omp.inner.for.cond:
19968 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19969 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
19970 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
19971 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19972 // CHECK11:       omp.inner.for.body:
19973 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
19974 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
19975 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
19976 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19977 // CHECK11:       omp.inner.for.inc:
19978 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19979 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
19980 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
19981 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19982 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
19983 // CHECK11:       omp.inner.for.end:
19984 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19985 // CHECK11:       omp.loop.exit:
19986 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19987 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
19988 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
19989 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19990 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
19991 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19992 // CHECK11:       .omp.final.then:
19993 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19994 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
19995 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
19996 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
19997 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
19998 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
19999 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20000 // CHECK11:       .omp.final.done:
20001 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20002 // CHECK11:       omp.precond.end:
20003 // CHECK11-NEXT:    ret void
20004 //
20005 //
20006 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19
20007 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20008 // CHECK11-NEXT:  entry:
20009 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20010 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20011 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
20012 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
20013 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20014 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20015 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20016 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20017 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20018 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20019 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20020 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20021 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20022 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20023 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20024 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20025 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20026 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
20027 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20028 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20029 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20030 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20031 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20032 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20033 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20034 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20035 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20036 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
20037 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
20038 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
20039 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20040 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
20041 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20042 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20043 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20044 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20045 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20046 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20047 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20048 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20049 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20050 // CHECK11:       omp.precond.then:
20051 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20052 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20053 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
20054 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20055 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20056 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
20057 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
20058 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20059 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20060 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20061 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20062 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20063 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
20064 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
20065 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20066 // CHECK11:       omp.dispatch.cond:
20067 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20068 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
20069 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20070 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
20071 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20072 // CHECK11:       omp.dispatch.body:
20073 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20074 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
20075 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20076 // CHECK11:       omp.inner.for.cond:
20077 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20078 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
20079 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
20080 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20081 // CHECK11:       omp.inner.for.body:
20082 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20083 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
20084 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20085 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
20086 // CHECK11-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
20087 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20088 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
20089 // CHECK11-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
20090 // CHECK11-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
20091 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20092 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
20093 // CHECK11-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
20094 // CHECK11-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
20095 // CHECK11-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
20096 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20097 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
20098 // CHECK11-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
20099 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20100 // CHECK11:       omp.body.continue:
20101 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20102 // CHECK11:       omp.inner.for.inc:
20103 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20104 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
20105 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20106 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
20107 // CHECK11:       omp.inner.for.end:
20108 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20109 // CHECK11:       omp.dispatch.inc:
20110 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
20111 // CHECK11:       omp.dispatch.end:
20112 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20113 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
20114 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20115 // CHECK11:       .omp.final.then:
20116 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20117 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
20118 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
20119 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
20120 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
20121 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
20122 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20123 // CHECK11:       .omp.final.done:
20124 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20125 // CHECK11:       omp.precond.end:
20126 // CHECK11-NEXT:    ret void
20127 //
20128 //
20129 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
20130 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
20131 // CHECK11-NEXT:  entry:
20132 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
20133 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20134 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
20135 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
20136 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
20137 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
20138 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20139 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
20140 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
20141 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
20142 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
20143 // CHECK11-NEXT:    ret void
20144 //
20145 //
20146 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..22
20147 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20148 // CHECK11-NEXT:  entry:
20149 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20150 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20151 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
20152 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20153 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20154 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20155 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20156 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20157 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20158 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20159 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20160 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20161 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20162 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20163 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20164 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20165 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20166 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
20167 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
20168 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20169 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20170 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
20171 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20172 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20173 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20174 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20175 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
20176 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20177 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
20178 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
20179 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
20180 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
20181 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
20182 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
20183 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20184 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20185 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
20186 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20187 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
20188 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20189 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20190 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20191 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
20192 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20193 // CHECK11:       omp.precond.then:
20194 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20195 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20196 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
20197 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20198 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20199 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20200 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
20201 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20202 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20203 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20204 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
20205 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20206 // CHECK11:       cond.true:
20207 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20208 // CHECK11-NEXT:    br label [[COND_END:%.*]]
20209 // CHECK11:       cond.false:
20210 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20211 // CHECK11-NEXT:    br label [[COND_END]]
20212 // CHECK11:       cond.end:
20213 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
20214 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20215 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20216 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
20217 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20218 // CHECK11:       omp.inner.for.cond:
20219 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20220 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
20221 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
20222 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20223 // CHECK11:       omp.inner.for.body:
20224 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
20225 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
20226 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
20227 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
20228 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
20229 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
20230 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20231 // CHECK11:       omp.inner.for.inc:
20232 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20233 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
20234 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
20235 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20236 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
20237 // CHECK11:       omp.inner.for.end:
20238 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20239 // CHECK11:       omp.loop.exit:
20240 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20241 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
20242 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
20243 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20244 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
20245 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20246 // CHECK11:       .omp.final.then:
20247 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20248 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
20249 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
20250 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
20251 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
20252 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
20253 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20254 // CHECK11:       .omp.final.done:
20255 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20256 // CHECK11:       omp.precond.end:
20257 // CHECK11-NEXT:    ret void
20258 //
20259 //
20260 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..23
20261 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
20262 // CHECK11-NEXT:  entry:
20263 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20264 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20265 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
20266 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
20267 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20268 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20269 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20270 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20271 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
20272 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20273 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20274 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20275 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20276 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20277 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20278 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20279 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20280 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20281 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
20282 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20283 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20284 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20285 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20286 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20287 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20288 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20289 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20290 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20291 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20292 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
20293 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
20294 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
20295 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20296 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20297 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20298 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20299 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20300 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
20301 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20302 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20303 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20304 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20305 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20306 // CHECK11:       omp.precond.then:
20307 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20308 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20309 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
20310 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20311 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20312 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
20313 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
20314 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20315 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20316 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20317 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20318 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20319 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20320 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
20321 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
20322 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20323 // CHECK11:       omp.dispatch.cond:
20324 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20325 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
20326 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20327 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
20328 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20329 // CHECK11:       omp.dispatch.body:
20330 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20331 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
20332 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20333 // CHECK11:       omp.inner.for.cond:
20334 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20335 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
20336 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
20337 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20338 // CHECK11:       omp.inner.for.body:
20339 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20340 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
20341 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20342 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
20343 // CHECK11-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
20344 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20345 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
20346 // CHECK11-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
20347 // CHECK11-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
20348 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20349 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
20350 // CHECK11-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
20351 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
20352 // CHECK11-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
20353 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20354 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
20355 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
20356 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20357 // CHECK11:       omp.body.continue:
20358 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20359 // CHECK11:       omp.inner.for.inc:
20360 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20361 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
20362 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20363 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
20364 // CHECK11:       omp.inner.for.end:
20365 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20366 // CHECK11:       omp.dispatch.inc:
20367 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
20368 // CHECK11:       omp.dispatch.end:
20369 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20370 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
20371 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20372 // CHECK11:       .omp.final.then:
20373 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20374 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
20375 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
20376 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
20377 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
20378 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
20379 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20380 // CHECK11:       .omp.final.done:
20381 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20382 // CHECK11:       omp.precond.end:
20383 // CHECK11-NEXT:    ret void
20384 //
20385 //
20386 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
20387 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] comdat {
20388 // CHECK11-NEXT:  entry:
20389 // CHECK11-NEXT:    [[A:%.*]] = alloca i32*, align 4
20390 // CHECK11-NEXT:    [[B:%.*]] = alloca i32*, align 4
20391 // CHECK11-NEXT:    [[C:%.*]] = alloca i32*, align 4
20392 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
20393 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
20394 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
20395 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
20396 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
20397 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
20398 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20399 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20400 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20401 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
20402 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
20403 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
20404 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
20405 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
20406 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
20407 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
20408 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
20409 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
20410 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
20411 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
20412 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
20413 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
20414 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
20415 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
20416 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
20417 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
20418 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
20419 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
20420 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
20421 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
20422 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
20423 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
20424 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
20425 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
20426 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
20427 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
20428 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
20429 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
20430 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
20431 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
20432 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
20433 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
20434 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
20435 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
20436 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
20437 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
20438 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
20439 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
20440 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
20441 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
20442 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
20443 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
20444 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
20445 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
20446 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
20447 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
20448 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
20449 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
20450 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
20451 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
20452 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
20453 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
20454 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
20455 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
20456 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
20457 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
20458 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
20459 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
20460 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
20461 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
20462 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
20463 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
20464 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 4
20465 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
20466 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
20467 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 4
20468 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
20469 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
20470 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
20471 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
20472 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 4
20473 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
20474 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
20475 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 4
20476 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
20477 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
20478 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
20479 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
20480 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 4
20481 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
20482 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
20483 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 4
20484 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
20485 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
20486 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
20487 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
20488 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
20489 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
20490 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20491 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
20492 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20493 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20494 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20495 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20496 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
20497 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
20498 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
20499 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20500 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
20501 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
20502 // CHECK11:       omp_offload.failed:
20503 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
20504 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
20505 // CHECK11:       omp_offload.cont:
20506 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
20507 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
20508 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
20509 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 4
20510 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 4
20511 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 4
20512 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
20513 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
20514 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
20515 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
20516 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
20517 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
20518 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
20519 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
20520 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
20521 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
20522 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 4
20523 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
20524 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
20525 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 4
20526 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
20527 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
20528 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
20529 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
20530 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 4
20531 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
20532 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
20533 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 4
20534 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
20535 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
20536 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
20537 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
20538 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 4
20539 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
20540 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
20541 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 4
20542 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
20543 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
20544 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
20545 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
20546 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
20547 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
20548 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
20549 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
20550 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
20551 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
20552 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
20553 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
20554 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
20555 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
20556 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
20557 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20558 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
20559 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
20560 // CHECK11:       omp_offload.failed14:
20561 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
20562 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
20563 // CHECK11:       omp_offload.cont15:
20564 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
20565 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
20566 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
20567 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
20568 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
20569 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
20570 // CHECK11-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 4
20571 // CHECK11-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 4
20572 // CHECK11-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 4
20573 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
20574 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
20575 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
20576 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
20577 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
20578 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
20579 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
20580 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
20581 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
20582 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
20583 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
20584 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
20585 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
20586 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
20587 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
20588 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
20589 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
20590 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
20591 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 4
20592 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
20593 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
20594 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 4
20595 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
20596 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
20597 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
20598 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
20599 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 4
20600 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
20601 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
20602 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 4
20603 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
20604 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
20605 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
20606 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
20607 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 4
20608 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
20609 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
20610 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 4
20611 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
20612 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
20613 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
20614 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
20615 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
20616 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
20617 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
20618 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
20619 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
20620 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
20621 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
20622 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
20623 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
20624 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
20625 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
20626 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20627 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
20628 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
20629 // CHECK11:       omp_offload.failed27:
20630 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP67]], i32 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
20631 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
20632 // CHECK11:       omp_offload.cont28:
20633 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
20634 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
20635 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
20636 // CHECK11-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 4
20637 // CHECK11-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 4
20638 // CHECK11-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 4
20639 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
20640 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
20641 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
20642 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
20643 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
20644 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
20645 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
20646 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
20647 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
20648 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
20649 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 4
20650 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
20651 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
20652 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 4
20653 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
20654 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
20655 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
20656 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
20657 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 4
20658 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
20659 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
20660 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 4
20661 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
20662 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
20663 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
20664 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
20665 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 4
20666 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
20667 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
20668 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 4
20669 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
20670 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
20671 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
20672 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
20673 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
20674 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
20675 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
20676 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
20677 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
20678 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
20679 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
20680 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
20681 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
20682 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
20683 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
20684 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20685 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
20686 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
20687 // CHECK11:       omp_offload.failed40:
20688 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
20689 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
20690 // CHECK11:       omp_offload.cont41:
20691 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
20692 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
20693 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
20694 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
20695 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
20696 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
20697 // CHECK11-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 4
20698 // CHECK11-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 4
20699 // CHECK11-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 4
20700 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
20701 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
20702 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
20703 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
20704 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
20705 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
20706 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
20707 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
20708 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
20709 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
20710 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
20711 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
20712 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
20713 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
20714 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
20715 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
20716 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
20717 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
20718 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 4
20719 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
20720 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
20721 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 4
20722 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
20723 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
20724 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
20725 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
20726 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 4
20727 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
20728 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
20729 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 4
20730 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
20731 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
20732 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
20733 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
20734 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 4
20735 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
20736 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
20737 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 4
20738 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
20739 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
20740 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
20741 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
20742 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
20743 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
20744 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
20745 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
20746 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
20747 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
20748 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
20749 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
20750 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
20751 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
20752 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
20753 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20754 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
20755 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
20756 // CHECK11:       omp_offload.failed54:
20757 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP140]], i32 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
20758 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
20759 // CHECK11:       omp_offload.cont55:
20760 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
20761 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
20762 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
20763 // CHECK11-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 4
20764 // CHECK11-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 4
20765 // CHECK11-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 4
20766 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
20767 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
20768 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
20769 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
20770 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
20771 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
20772 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
20773 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
20774 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
20775 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
20776 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 4
20777 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
20778 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
20779 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 4
20780 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
20781 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
20782 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
20783 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
20784 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 4
20785 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
20786 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
20787 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 4
20788 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
20789 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
20790 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
20791 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
20792 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 4
20793 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
20794 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
20795 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 4
20796 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
20797 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
20798 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
20799 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
20800 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
20801 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
20802 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
20803 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
20804 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
20805 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
20806 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
20807 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
20808 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
20809 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
20810 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
20811 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20812 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
20813 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
20814 // CHECK11:       omp_offload.failed67:
20815 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
20816 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
20817 // CHECK11:       omp_offload.cont68:
20818 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
20819 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
20820 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
20821 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
20822 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
20823 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
20824 // CHECK11-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 4
20825 // CHECK11-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 4
20826 // CHECK11-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 4
20827 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
20828 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
20829 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
20830 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
20831 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
20832 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
20833 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
20834 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
20835 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
20836 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
20837 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
20838 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
20839 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
20840 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
20841 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
20842 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
20843 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
20844 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
20845 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 4
20846 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
20847 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
20848 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 4
20849 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
20850 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
20851 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
20852 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
20853 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 4
20854 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
20855 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
20856 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 4
20857 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
20858 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
20859 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
20860 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
20861 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 4
20862 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
20863 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
20864 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 4
20865 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
20866 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
20867 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
20868 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
20869 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
20870 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
20871 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
20872 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
20873 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
20874 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
20875 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
20876 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
20877 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
20878 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
20879 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
20880 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20881 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
20882 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
20883 // CHECK11:       omp_offload.failed81:
20884 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP213]], i32 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
20885 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
20886 // CHECK11:       omp_offload.cont82:
20887 // CHECK11-NEXT:    ret i32 0
20888 //
20889 //
20890 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
20891 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
20892 // CHECK11-NEXT:  entry:
20893 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20894 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
20895 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
20896 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
20897 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20898 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
20899 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
20900 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
20901 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
20902 // CHECK11-NEXT:    ret void
20903 //
20904 //
20905 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..26
20906 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20907 // CHECK11-NEXT:  entry:
20908 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20909 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20910 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20911 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
20912 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
20913 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
20914 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20915 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20916 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20917 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20918 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20919 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20920 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20921 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20922 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20923 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
20924 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20925 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20926 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20927 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
20928 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
20929 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
20930 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20931 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
20932 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
20933 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
20934 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20935 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
20936 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20937 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20938 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20939 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20940 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20941 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20942 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20943 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20944 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20945 // CHECK11:       omp.precond.then:
20946 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20947 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20948 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
20949 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20950 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20951 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20952 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
20953 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20954 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20955 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20956 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
20957 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20958 // CHECK11:       cond.true:
20959 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20960 // CHECK11-NEXT:    br label [[COND_END:%.*]]
20961 // CHECK11:       cond.false:
20962 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20963 // CHECK11-NEXT:    br label [[COND_END]]
20964 // CHECK11:       cond.end:
20965 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
20966 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20967 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20968 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
20969 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20970 // CHECK11:       omp.inner.for.cond:
20971 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20972 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
20973 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
20974 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20975 // CHECK11:       omp.inner.for.body:
20976 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
20977 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
20978 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
20979 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20980 // CHECK11:       omp.inner.for.inc:
20981 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20982 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
20983 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
20984 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20985 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
20986 // CHECK11:       omp.inner.for.end:
20987 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20988 // CHECK11:       omp.loop.exit:
20989 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20990 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
20991 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
20992 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20993 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
20994 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20995 // CHECK11:       .omp.final.then:
20996 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20997 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
20998 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
20999 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21000 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21001 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21002 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21003 // CHECK11:       .omp.final.done:
21004 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21005 // CHECK11:       omp.precond.end:
21006 // CHECK11-NEXT:    ret void
21007 //
21008 //
21009 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..27
21010 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21011 // CHECK11-NEXT:  entry:
21012 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21013 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21014 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21015 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21016 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21017 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21018 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21019 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21020 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21021 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21022 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21023 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21024 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21025 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21026 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21027 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21028 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21029 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21030 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21031 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21032 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21033 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21034 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21035 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21036 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21037 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21038 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21039 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21040 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21041 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21042 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21043 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21044 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21045 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21046 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21047 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21048 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21049 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21050 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21051 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21052 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21053 // CHECK11:       omp.precond.then:
21054 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21055 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21056 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21057 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21058 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21059 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21060 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21061 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21062 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21063 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21064 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21065 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21066 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21067 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21068 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21069 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21070 // CHECK11:       cond.true:
21071 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21072 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21073 // CHECK11:       cond.false:
21074 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21075 // CHECK11-NEXT:    br label [[COND_END]]
21076 // CHECK11:       cond.end:
21077 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21078 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21079 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21080 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21081 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21082 // CHECK11:       omp.inner.for.cond:
21083 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21084 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
21085 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21086 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21087 // CHECK11:       omp.inner.for.body:
21088 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21089 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21090 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21091 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
21092 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
21093 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21094 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21095 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
21096 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
21097 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21098 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21099 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
21100 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21101 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
21102 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21103 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21104 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
21105 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21106 // CHECK11:       omp.body.continue:
21107 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21108 // CHECK11:       omp.inner.for.inc:
21109 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21110 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21111 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21112 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
21113 // CHECK11:       omp.inner.for.end:
21114 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21115 // CHECK11:       omp.loop.exit:
21116 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21117 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21118 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21119 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21120 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21121 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21122 // CHECK11:       .omp.final.then:
21123 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21124 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21125 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21126 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21127 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21128 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21129 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21130 // CHECK11:       .omp.final.done:
21131 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21132 // CHECK11:       omp.precond.end:
21133 // CHECK11-NEXT:    ret void
21134 //
21135 //
21136 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
21137 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21138 // CHECK11-NEXT:  entry:
21139 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21140 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21141 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21142 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21143 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21144 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21145 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21146 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21147 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21148 // CHECK11-NEXT:    ret void
21149 //
21150 //
21151 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..30
21152 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21153 // CHECK11-NEXT:  entry:
21154 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21155 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21156 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21157 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21158 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21159 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21160 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21161 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21162 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21163 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21164 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21165 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21166 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21167 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21168 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21169 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21170 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21171 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21172 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21173 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21174 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21175 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21176 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21177 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21178 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21179 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21180 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21181 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21182 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21183 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21184 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21185 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21186 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21187 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21188 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21189 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21190 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21191 // CHECK11:       omp.precond.then:
21192 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21193 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21194 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
21195 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21196 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21197 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21198 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21199 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21200 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21201 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21202 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
21203 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21204 // CHECK11:       cond.true:
21205 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21206 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21207 // CHECK11:       cond.false:
21208 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21209 // CHECK11-NEXT:    br label [[COND_END]]
21210 // CHECK11:       cond.end:
21211 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
21212 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21213 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21214 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
21215 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21216 // CHECK11:       omp.inner.for.cond:
21217 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21218 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
21219 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
21220 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21221 // CHECK11:       omp.inner.for.body:
21222 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
21223 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
21224 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
21225 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21226 // CHECK11:       omp.inner.for.inc:
21227 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21228 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
21229 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
21230 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21231 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
21232 // CHECK11:       omp.inner.for.end:
21233 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21234 // CHECK11:       omp.loop.exit:
21235 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21236 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
21237 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
21238 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21239 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
21240 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21241 // CHECK11:       .omp.final.then:
21242 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21243 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
21244 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
21245 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21246 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21247 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21248 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21249 // CHECK11:       .omp.final.done:
21250 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21251 // CHECK11:       omp.precond.end:
21252 // CHECK11-NEXT:    ret void
21253 //
21254 //
21255 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..31
21256 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21257 // CHECK11-NEXT:  entry:
21258 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21259 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21260 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21261 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21262 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21263 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21264 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21265 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21266 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21267 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21268 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21269 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21270 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21271 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21272 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21273 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21274 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21275 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21276 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21277 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21278 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21279 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21280 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21281 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21282 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21283 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21284 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21285 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21286 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21287 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21288 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21289 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21290 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21291 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21292 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21293 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21294 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21295 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21296 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21297 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21298 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21299 // CHECK11:       omp.precond.then:
21300 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21301 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21302 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21303 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21304 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21305 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21306 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21307 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21308 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21309 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21310 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21311 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21312 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21313 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21314 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21315 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21316 // CHECK11:       cond.true:
21317 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21318 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21319 // CHECK11:       cond.false:
21320 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21321 // CHECK11-NEXT:    br label [[COND_END]]
21322 // CHECK11:       cond.end:
21323 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21324 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21325 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21326 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21327 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21328 // CHECK11:       omp.inner.for.cond:
21329 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21330 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
21331 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21332 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21333 // CHECK11:       omp.inner.for.body:
21334 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21335 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21336 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21337 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
21338 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
21339 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21340 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21341 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
21342 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
21343 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21344 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21345 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
21346 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21347 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
21348 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21349 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21350 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
21351 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21352 // CHECK11:       omp.body.continue:
21353 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21354 // CHECK11:       omp.inner.for.inc:
21355 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21356 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21357 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21358 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
21359 // CHECK11:       omp.inner.for.end:
21360 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21361 // CHECK11:       omp.loop.exit:
21362 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21363 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21364 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21365 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21366 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21367 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21368 // CHECK11:       .omp.final.then:
21369 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21370 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21371 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21372 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21373 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21374 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21375 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21376 // CHECK11:       .omp.final.done:
21377 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21378 // CHECK11:       omp.precond.end:
21379 // CHECK11-NEXT:    ret void
21380 //
21381 //
21382 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
21383 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21384 // CHECK11-NEXT:  entry:
21385 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
21386 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21387 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21388 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21389 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21390 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
21391 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21392 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21393 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21394 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21395 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21396 // CHECK11-NEXT:    ret void
21397 //
21398 //
21399 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..34
21400 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21401 // CHECK11-NEXT:  entry:
21402 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21403 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21404 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
21405 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21406 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21407 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21408 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21409 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21410 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21411 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21412 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21413 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21414 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21415 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21416 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21417 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21418 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21419 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21420 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21421 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
21422 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21423 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21424 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21425 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21426 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
21427 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21428 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21429 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21430 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21431 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
21432 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
21433 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21434 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
21435 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21436 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21437 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21438 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21439 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21440 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
21441 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21442 // CHECK11:       omp.precond.then:
21443 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21444 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21445 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
21446 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21447 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21448 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
21449 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21450 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21451 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
21452 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21453 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21454 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21455 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21456 // CHECK11:       cond.true:
21457 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21458 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21459 // CHECK11:       cond.false:
21460 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21461 // CHECK11-NEXT:    br label [[COND_END]]
21462 // CHECK11:       cond.end:
21463 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21464 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21465 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21466 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21467 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21468 // CHECK11:       omp.inner.for.cond:
21469 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21470 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21471 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
21472 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
21473 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21474 // CHECK11:       omp.inner.for.body:
21475 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21476 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21477 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
21478 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21479 // CHECK11:       omp.inner.for.inc:
21480 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21481 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21482 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
21483 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21484 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21485 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21486 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
21487 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21488 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21489 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21490 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
21491 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21492 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21493 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21494 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
21495 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
21496 // CHECK11:       cond.true10:
21497 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21498 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
21499 // CHECK11:       cond.false11:
21500 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21501 // CHECK11-NEXT:    br label [[COND_END12]]
21502 // CHECK11:       cond.end12:
21503 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
21504 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21505 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21506 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21507 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
21508 // CHECK11:       omp.inner.for.end:
21509 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21510 // CHECK11:       omp.loop.exit:
21511 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21512 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
21513 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
21514 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21515 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
21516 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21517 // CHECK11:       .omp.final.then:
21518 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21519 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
21520 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
21521 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
21522 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
21523 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
21524 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21525 // CHECK11:       .omp.final.done:
21526 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21527 // CHECK11:       omp.precond.end:
21528 // CHECK11-NEXT:    ret void
21529 //
21530 //
21531 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..35
21532 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21533 // CHECK11-NEXT:  entry:
21534 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21535 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21536 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21537 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21538 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21539 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21540 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21541 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21542 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21543 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21544 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21545 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21546 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21547 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21548 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21549 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21550 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21551 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21552 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21553 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21554 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21555 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21556 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21557 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21558 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21559 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21560 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21561 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21562 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21563 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21564 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21565 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21566 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21567 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21568 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21569 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21570 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21571 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21572 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21573 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21574 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21575 // CHECK11:       omp.precond.then:
21576 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21577 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21578 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21579 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21580 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21581 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21582 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21583 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21584 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21585 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21586 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21587 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21588 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21589 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21590 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21591 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21592 // CHECK11:       cond.true:
21593 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21594 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21595 // CHECK11:       cond.false:
21596 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21597 // CHECK11-NEXT:    br label [[COND_END]]
21598 // CHECK11:       cond.end:
21599 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21600 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21601 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21602 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21603 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21604 // CHECK11:       omp.inner.for.cond:
21605 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21606 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
21607 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21608 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21609 // CHECK11:       omp.inner.for.body:
21610 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21611 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21612 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21613 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
21614 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
21615 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21616 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21617 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
21618 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
21619 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21620 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21621 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
21622 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21623 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
21624 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21625 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21626 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
21627 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21628 // CHECK11:       omp.body.continue:
21629 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21630 // CHECK11:       omp.inner.for.inc:
21631 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21632 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21633 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21634 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
21635 // CHECK11:       omp.inner.for.end:
21636 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21637 // CHECK11:       omp.loop.exit:
21638 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21639 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21640 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21641 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21642 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21643 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21644 // CHECK11:       .omp.final.then:
21645 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21646 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21647 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21648 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21649 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21650 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21651 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21652 // CHECK11:       .omp.final.done:
21653 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21654 // CHECK11:       omp.precond.end:
21655 // CHECK11-NEXT:    ret void
21656 //
21657 //
21658 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
21659 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21660 // CHECK11-NEXT:  entry:
21661 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21662 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21663 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21664 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21665 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21666 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21667 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21668 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21669 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21670 // CHECK11-NEXT:    ret void
21671 //
21672 //
21673 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..38
21674 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21675 // CHECK11-NEXT:  entry:
21676 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21677 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21678 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21679 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21680 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21681 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21682 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21683 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21684 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21685 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21686 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21687 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21688 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21689 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21690 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21691 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21692 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21693 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21694 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21695 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21696 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21697 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21698 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21699 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21700 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21701 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21702 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21703 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21704 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21705 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21706 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21707 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21708 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21709 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21710 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21711 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21712 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21713 // CHECK11:       omp.precond.then:
21714 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21715 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21716 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
21717 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21718 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21719 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21720 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21721 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21722 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21723 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21724 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
21725 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21726 // CHECK11:       cond.true:
21727 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21728 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21729 // CHECK11:       cond.false:
21730 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21731 // CHECK11-NEXT:    br label [[COND_END]]
21732 // CHECK11:       cond.end:
21733 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
21734 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21735 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21736 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
21737 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21738 // CHECK11:       omp.inner.for.cond:
21739 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21740 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
21741 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
21742 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21743 // CHECK11:       omp.inner.for.body:
21744 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
21745 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
21746 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
21747 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21748 // CHECK11:       omp.inner.for.inc:
21749 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21750 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
21751 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
21752 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21753 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
21754 // CHECK11:       omp.inner.for.end:
21755 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21756 // CHECK11:       omp.loop.exit:
21757 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21758 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
21759 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
21760 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21761 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
21762 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21763 // CHECK11:       .omp.final.then:
21764 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21765 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
21766 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
21767 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21768 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21769 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21770 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21771 // CHECK11:       .omp.final.done:
21772 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21773 // CHECK11:       omp.precond.end:
21774 // CHECK11-NEXT:    ret void
21775 //
21776 //
21777 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..39
21778 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21779 // CHECK11-NEXT:  entry:
21780 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21781 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21782 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21783 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21784 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21785 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21786 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21787 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21788 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21789 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21790 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21791 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21792 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21793 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21794 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21795 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21796 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21797 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21798 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21799 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21800 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21801 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21802 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21803 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21804 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21805 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21806 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21807 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21808 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21809 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21810 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21811 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21812 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21813 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21814 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21815 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21816 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21817 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21818 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21819 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21820 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21821 // CHECK11:       omp.precond.then:
21822 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21823 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21824 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21825 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21826 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21827 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21828 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21829 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21830 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21831 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21832 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21833 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21834 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21835 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21836 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21837 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21838 // CHECK11:       cond.true:
21839 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21840 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21841 // CHECK11:       cond.false:
21842 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21843 // CHECK11-NEXT:    br label [[COND_END]]
21844 // CHECK11:       cond.end:
21845 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21846 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21847 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21848 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21849 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21850 // CHECK11:       omp.inner.for.cond:
21851 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21852 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
21853 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21854 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21855 // CHECK11:       omp.inner.for.body:
21856 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21857 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21858 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21859 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
21860 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
21861 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21862 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21863 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
21864 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
21865 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21866 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21867 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
21868 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21869 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
21870 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21871 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21872 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
21873 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21874 // CHECK11:       omp.body.continue:
21875 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21876 // CHECK11:       omp.inner.for.inc:
21877 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21878 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21879 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21880 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
21881 // CHECK11:       omp.inner.for.end:
21882 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21883 // CHECK11:       omp.loop.exit:
21884 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21885 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21886 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21887 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21888 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21889 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21890 // CHECK11:       .omp.final.then:
21891 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21892 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21893 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21894 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21895 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21896 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21897 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21898 // CHECK11:       .omp.final.done:
21899 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21900 // CHECK11:       omp.precond.end:
21901 // CHECK11-NEXT:    ret void
21902 //
21903 //
21904 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
21905 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21906 // CHECK11-NEXT:  entry:
21907 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
21908 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21909 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21910 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21911 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21912 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
21913 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21914 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21915 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21916 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21917 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21918 // CHECK11-NEXT:    ret void
21919 //
21920 //
21921 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..42
21922 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21923 // CHECK11-NEXT:  entry:
21924 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21925 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21926 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
21927 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21928 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21929 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21930 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21931 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21932 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21933 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21934 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21935 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21936 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21937 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21938 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21939 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21940 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21941 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
21942 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
21943 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21944 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21945 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
21946 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21947 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21948 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21949 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21950 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
21951 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21952 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21953 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21954 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21955 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
21956 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
21957 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
21958 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21959 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21960 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
21961 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21962 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
21963 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21964 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21965 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21966 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
21967 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21968 // CHECK11:       omp.precond.then:
21969 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21970 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21971 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
21972 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21973 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21974 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21975 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21976 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21977 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21978 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21979 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21980 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21981 // CHECK11:       cond.true:
21982 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21983 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21984 // CHECK11:       cond.false:
21985 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21986 // CHECK11-NEXT:    br label [[COND_END]]
21987 // CHECK11:       cond.end:
21988 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21989 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21990 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21991 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21992 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21993 // CHECK11:       omp.inner.for.cond:
21994 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
21995 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
21996 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21997 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21998 // CHECK11:       omp.inner.for.body:
21999 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
22000 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
22001 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
22002 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
22003 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
22004 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
22005 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22006 // CHECK11:       omp.inner.for.inc:
22007 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
22008 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
22009 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
22010 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
22011 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
22012 // CHECK11:       omp.inner.for.end:
22013 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22014 // CHECK11:       omp.loop.exit:
22015 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22016 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
22017 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
22018 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22019 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
22020 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22021 // CHECK11:       .omp.final.then:
22022 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22023 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
22024 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22025 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22026 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22027 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
22028 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22029 // CHECK11:       .omp.final.done:
22030 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22031 // CHECK11:       omp.precond.end:
22032 // CHECK11-NEXT:    ret void
22033 //
22034 //
22035 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..43
22036 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22037 // CHECK11-NEXT:  entry:
22038 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22039 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22040 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22041 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22042 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22043 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22044 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22045 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22046 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22047 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22048 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22049 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22050 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22051 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22052 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22053 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22054 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22055 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22056 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22057 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22058 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22059 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22060 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22061 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22062 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22063 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22064 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22065 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22066 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22067 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22068 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22069 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22070 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22071 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22072 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22073 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22074 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22075 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22076 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22077 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22078 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22079 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22080 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22081 // CHECK11:       omp.precond.then:
22082 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22083 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22084 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22085 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22086 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22087 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22088 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22089 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22090 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22091 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22092 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22093 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
22094 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
22095 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22096 // CHECK11:       omp.dispatch.cond:
22097 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22098 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22099 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
22100 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22101 // CHECK11:       cond.true:
22102 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22103 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22104 // CHECK11:       cond.false:
22105 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22106 // CHECK11-NEXT:    br label [[COND_END]]
22107 // CHECK11:       cond.end:
22108 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
22109 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22110 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22111 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
22112 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22113 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22114 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
22115 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22116 // CHECK11:       omp.dispatch.body:
22117 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22118 // CHECK11:       omp.inner.for.cond:
22119 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22120 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
22121 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
22122 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22123 // CHECK11:       omp.inner.for.body:
22124 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22125 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
22126 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22127 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
22128 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
22129 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22130 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
22131 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
22132 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
22133 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22134 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
22135 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
22136 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
22137 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
22138 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22139 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
22140 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
22141 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22142 // CHECK11:       omp.body.continue:
22143 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22144 // CHECK11:       omp.inner.for.inc:
22145 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22146 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
22147 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22148 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
22149 // CHECK11:       omp.inner.for.end:
22150 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22151 // CHECK11:       omp.dispatch.inc:
22152 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22153 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22154 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
22155 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
22156 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22157 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22158 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
22159 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
22160 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22161 // CHECK11:       omp.dispatch.end:
22162 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22163 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
22164 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
22165 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22166 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
22167 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22168 // CHECK11:       .omp.final.then:
22169 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22170 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
22171 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
22172 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
22173 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
22174 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
22175 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22176 // CHECK11:       .omp.final.done:
22177 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22178 // CHECK11:       omp.precond.end:
22179 // CHECK11-NEXT:    ret void
22180 //
22181 //
22182 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
22183 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
22184 // CHECK11-NEXT:  entry:
22185 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22186 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
22187 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
22188 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
22189 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22190 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
22191 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
22192 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
22193 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
22194 // CHECK11-NEXT:    ret void
22195 //
22196 //
22197 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..46
22198 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22199 // CHECK11-NEXT:  entry:
22200 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22201 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22202 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22203 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22204 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22205 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22206 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22207 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22208 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22209 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22210 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22211 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22212 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22213 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22214 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22215 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
22216 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22217 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22218 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22219 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22220 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22221 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22222 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22223 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22224 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22225 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22226 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22227 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
22228 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22229 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22230 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22231 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22232 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22233 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22234 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22235 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22236 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22237 // CHECK11:       omp.precond.then:
22238 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22239 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22240 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
22241 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22242 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22243 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22244 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22245 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22246 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22247 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22248 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
22249 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22250 // CHECK11:       cond.true:
22251 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22252 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22253 // CHECK11:       cond.false:
22254 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22255 // CHECK11-NEXT:    br label [[COND_END]]
22256 // CHECK11:       cond.end:
22257 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
22258 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22259 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22260 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
22261 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22262 // CHECK11:       omp.inner.for.cond:
22263 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22264 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
22265 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
22266 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22267 // CHECK11:       omp.inner.for.body:
22268 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
22269 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
22270 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
22271 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22272 // CHECK11:       omp.inner.for.inc:
22273 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22274 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
22275 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
22276 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22277 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
22278 // CHECK11:       omp.inner.for.end:
22279 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22280 // CHECK11:       omp.loop.exit:
22281 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22282 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
22283 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
22284 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22285 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
22286 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22287 // CHECK11:       .omp.final.then:
22288 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22289 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
22290 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
22291 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
22292 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
22293 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
22294 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22295 // CHECK11:       .omp.final.done:
22296 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22297 // CHECK11:       omp.precond.end:
22298 // CHECK11-NEXT:    ret void
22299 //
22300 //
22301 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..47
22302 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22303 // CHECK11-NEXT:  entry:
22304 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22305 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22306 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22307 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22308 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22309 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22310 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22311 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22312 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22313 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22314 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22315 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22316 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22317 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22318 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22319 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22320 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22321 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
22322 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22323 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22324 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22325 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22326 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22327 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22328 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22329 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22330 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22331 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22332 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22333 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22334 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22335 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
22336 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22337 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22338 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22339 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22340 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22341 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22342 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22343 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22344 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22345 // CHECK11:       omp.precond.then:
22346 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22347 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22348 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22349 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22350 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22351 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22352 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22353 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22354 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22355 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22356 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22357 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22358 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
22359 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
22360 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22361 // CHECK11:       omp.dispatch.cond:
22362 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22363 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
22364 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
22365 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
22366 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22367 // CHECK11:       omp.dispatch.body:
22368 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22369 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
22370 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22371 // CHECK11:       omp.inner.for.cond:
22372 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22373 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
22374 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
22375 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22376 // CHECK11:       omp.inner.for.body:
22377 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22378 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
22379 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22380 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
22381 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
22382 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22383 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
22384 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
22385 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
22386 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22387 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
22388 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
22389 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
22390 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
22391 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22392 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
22393 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
22394 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22395 // CHECK11:       omp.body.continue:
22396 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22397 // CHECK11:       omp.inner.for.inc:
22398 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22399 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
22400 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22401 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
22402 // CHECK11:       omp.inner.for.end:
22403 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22404 // CHECK11:       omp.dispatch.inc:
22405 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22406 // CHECK11:       omp.dispatch.end:
22407 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22408 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
22409 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22410 // CHECK11:       .omp.final.then:
22411 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22412 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
22413 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
22414 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
22415 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
22416 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
22417 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22418 // CHECK11:       .omp.final.done:
22419 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22420 // CHECK11:       omp.precond.end:
22421 // CHECK11-NEXT:    ret void
22422 //
22423 //
22424 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
22425 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
22426 // CHECK11-NEXT:  entry:
22427 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
22428 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22429 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
22430 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
22431 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
22432 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
22433 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22434 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
22435 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
22436 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
22437 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
22438 // CHECK11-NEXT:    ret void
22439 //
22440 //
22441 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..50
22442 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22443 // CHECK11-NEXT:  entry:
22444 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22445 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22446 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
22447 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22448 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22449 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22450 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22451 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22452 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22453 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22454 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22455 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22456 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22457 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22458 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22459 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22460 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22461 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22462 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
22463 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22464 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22465 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
22466 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22467 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22468 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22469 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22470 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
22471 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22472 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22473 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22474 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22475 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
22476 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
22477 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
22478 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22479 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22480 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
22481 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22482 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22483 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22484 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22485 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22486 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
22487 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22488 // CHECK11:       omp.precond.then:
22489 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22490 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22491 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
22492 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22493 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22494 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22495 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
22496 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22497 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22498 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22499 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
22500 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22501 // CHECK11:       cond.true:
22502 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22503 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22504 // CHECK11:       cond.false:
22505 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22506 // CHECK11-NEXT:    br label [[COND_END]]
22507 // CHECK11:       cond.end:
22508 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
22509 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22510 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22511 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
22512 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22513 // CHECK11:       omp.inner.for.cond:
22514 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22515 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
22516 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
22517 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22518 // CHECK11:       omp.inner.for.body:
22519 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
22520 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
22521 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
22522 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
22523 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
22524 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
22525 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22526 // CHECK11:       omp.inner.for.inc:
22527 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22528 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
22529 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
22530 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22531 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
22532 // CHECK11:       omp.inner.for.end:
22533 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22534 // CHECK11:       omp.loop.exit:
22535 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22536 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
22537 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
22538 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22539 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
22540 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22541 // CHECK11:       .omp.final.then:
22542 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22543 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
22544 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22545 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22546 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22547 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
22548 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22549 // CHECK11:       .omp.final.done:
22550 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22551 // CHECK11:       omp.precond.end:
22552 // CHECK11-NEXT:    ret void
22553 //
22554 //
22555 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..51
22556 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22557 // CHECK11-NEXT:  entry:
22558 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22559 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22560 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22561 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22562 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22563 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22564 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22565 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22566 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22567 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22568 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22569 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22570 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22571 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22572 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22573 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22574 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22575 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22576 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22577 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22578 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22579 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22580 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22581 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22582 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22583 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22584 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22585 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22586 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22587 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22588 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22589 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22590 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22591 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22592 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22593 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22594 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22595 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22596 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22597 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22598 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22599 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22600 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22601 // CHECK11:       omp.precond.then:
22602 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22603 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22604 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22605 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22606 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22607 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22608 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22609 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22610 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22611 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22612 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22613 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22614 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22615 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
22616 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
22617 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22618 // CHECK11:       omp.dispatch.cond:
22619 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22620 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
22621 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
22622 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
22623 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22624 // CHECK11:       omp.dispatch.body:
22625 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22626 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
22627 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22628 // CHECK11:       omp.inner.for.cond:
22629 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22630 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
22631 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
22632 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22633 // CHECK11:       omp.inner.for.body:
22634 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22635 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
22636 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22637 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
22638 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
22639 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22640 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
22641 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
22642 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
22643 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22644 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
22645 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
22646 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
22647 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
22648 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22649 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
22650 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
22651 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22652 // CHECK11:       omp.body.continue:
22653 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22654 // CHECK11:       omp.inner.for.inc:
22655 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22656 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
22657 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22658 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
22659 // CHECK11:       omp.inner.for.end:
22660 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22661 // CHECK11:       omp.dispatch.inc:
22662 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22663 // CHECK11:       omp.dispatch.end:
22664 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22665 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22666 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22667 // CHECK11:       .omp.final.then:
22668 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22669 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
22670 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
22671 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
22672 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
22673 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
22674 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22675 // CHECK11:       .omp.final.done:
22676 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22677 // CHECK11:       omp.precond.end:
22678 // CHECK11-NEXT:    ret void
22679 //
22680 //
22681 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
22682 // CHECK11-SAME: () #[[ATTR4:[0-9]+]] {
22683 // CHECK11-NEXT:  entry:
22684 // CHECK11-NEXT:    call void @__tgt_register_requires(i64 1)
22685 // CHECK11-NEXT:    ret void
22686 //
22687 //
22688 // CHECK12-LABEL: define {{[^@]+}}@main
22689 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
22690 // CHECK12-NEXT:  entry:
22691 // CHECK12-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
22692 // CHECK12-NEXT:    [[A:%.*]] = alloca double*, align 4
22693 // CHECK12-NEXT:    [[B:%.*]] = alloca double*, align 4
22694 // CHECK12-NEXT:    [[C:%.*]] = alloca double*, align 4
22695 // CHECK12-NEXT:    [[N:%.*]] = alloca i32, align 4
22696 // CHECK12-NEXT:    [[CH:%.*]] = alloca i32, align 4
22697 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22698 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
22699 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
22700 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
22701 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22702 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22703 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22704 // CHECK12-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
22705 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
22706 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
22707 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
22708 // CHECK12-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
22709 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
22710 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
22711 // CHECK12-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
22712 // CHECK12-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
22713 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
22714 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
22715 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
22716 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
22717 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
22718 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
22719 // CHECK12-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
22720 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
22721 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
22722 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
22723 // CHECK12-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
22724 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
22725 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
22726 // CHECK12-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
22727 // CHECK12-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
22728 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
22729 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
22730 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
22731 // CHECK12-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
22732 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
22733 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
22734 // CHECK12-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
22735 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
22736 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
22737 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
22738 // CHECK12-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
22739 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
22740 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
22741 // CHECK12-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
22742 // CHECK12-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
22743 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
22744 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
22745 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
22746 // CHECK12-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
22747 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
22748 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
22749 // CHECK12-NEXT:    store i32 0, i32* [[RETVAL]], align 4
22750 // CHECK12-NEXT:    store i32 10000, i32* [[N]], align 4
22751 // CHECK12-NEXT:    store i32 100, i32* [[CH]], align 4
22752 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
22753 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
22754 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
22755 // CHECK12-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 4
22756 // CHECK12-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 4
22757 // CHECK12-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 4
22758 // CHECK12-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22759 // CHECK12-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
22760 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
22761 // CHECK12-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22762 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
22763 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
22764 // CHECK12-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
22765 // CHECK12-NEXT:    store i8* null, i8** [[TMP9]], align 4
22766 // CHECK12-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
22767 // CHECK12-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
22768 // CHECK12-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 4
22769 // CHECK12-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
22770 // CHECK12-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
22771 // CHECK12-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 4
22772 // CHECK12-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
22773 // CHECK12-NEXT:    store i8* null, i8** [[TMP14]], align 4
22774 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
22775 // CHECK12-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
22776 // CHECK12-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 4
22777 // CHECK12-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
22778 // CHECK12-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
22779 // CHECK12-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 4
22780 // CHECK12-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
22781 // CHECK12-NEXT:    store i8* null, i8** [[TMP19]], align 4
22782 // CHECK12-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
22783 // CHECK12-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
22784 // CHECK12-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 4
22785 // CHECK12-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
22786 // CHECK12-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
22787 // CHECK12-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 4
22788 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
22789 // CHECK12-NEXT:    store i8* null, i8** [[TMP24]], align 4
22790 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22791 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22792 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
22793 // CHECK12-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
22794 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22795 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
22796 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22797 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22798 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22799 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22800 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
22801 // CHECK12-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
22802 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
22803 // CHECK12-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22804 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22805 // CHECK12-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
22806 // CHECK12:       omp_offload.failed:
22807 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
22808 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT]]
22809 // CHECK12:       omp_offload.cont:
22810 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
22811 // CHECK12-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
22812 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
22813 // CHECK12-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 4
22814 // CHECK12-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 4
22815 // CHECK12-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 4
22816 // CHECK12-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
22817 // CHECK12-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
22818 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
22819 // CHECK12-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
22820 // CHECK12-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
22821 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
22822 // CHECK12-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
22823 // CHECK12-NEXT:    store i8* null, i8** [[TMP42]], align 4
22824 // CHECK12-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
22825 // CHECK12-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
22826 // CHECK12-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 4
22827 // CHECK12-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
22828 // CHECK12-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
22829 // CHECK12-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 4
22830 // CHECK12-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
22831 // CHECK12-NEXT:    store i8* null, i8** [[TMP47]], align 4
22832 // CHECK12-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
22833 // CHECK12-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
22834 // CHECK12-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 4
22835 // CHECK12-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
22836 // CHECK12-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
22837 // CHECK12-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 4
22838 // CHECK12-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
22839 // CHECK12-NEXT:    store i8* null, i8** [[TMP52]], align 4
22840 // CHECK12-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
22841 // CHECK12-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
22842 // CHECK12-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 4
22843 // CHECK12-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
22844 // CHECK12-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
22845 // CHECK12-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 4
22846 // CHECK12-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
22847 // CHECK12-NEXT:    store i8* null, i8** [[TMP57]], align 4
22848 // CHECK12-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
22849 // CHECK12-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
22850 // CHECK12-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
22851 // CHECK12-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
22852 // CHECK12-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
22853 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
22854 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
22855 // CHECK12-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
22856 // CHECK12-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
22857 // CHECK12-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
22858 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
22859 // CHECK12-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
22860 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
22861 // CHECK12-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22862 // CHECK12-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
22863 // CHECK12-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
22864 // CHECK12:       omp_offload.failed14:
22865 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
22866 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
22867 // CHECK12:       omp_offload.cont15:
22868 // CHECK12-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
22869 // CHECK12-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
22870 // CHECK12-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
22871 // CHECK12-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
22872 // CHECK12-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
22873 // CHECK12-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
22874 // CHECK12-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 4
22875 // CHECK12-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 4
22876 // CHECK12-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 4
22877 // CHECK12-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
22878 // CHECK12-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
22879 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
22880 // CHECK12-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
22881 // CHECK12-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
22882 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
22883 // CHECK12-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
22884 // CHECK12-NEXT:    store i8* null, i8** [[TMP77]], align 4
22885 // CHECK12-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
22886 // CHECK12-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
22887 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
22888 // CHECK12-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
22889 // CHECK12-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
22890 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
22891 // CHECK12-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
22892 // CHECK12-NEXT:    store i8* null, i8** [[TMP82]], align 4
22893 // CHECK12-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
22894 // CHECK12-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
22895 // CHECK12-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 4
22896 // CHECK12-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
22897 // CHECK12-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
22898 // CHECK12-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 4
22899 // CHECK12-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
22900 // CHECK12-NEXT:    store i8* null, i8** [[TMP87]], align 4
22901 // CHECK12-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
22902 // CHECK12-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
22903 // CHECK12-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 4
22904 // CHECK12-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
22905 // CHECK12-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
22906 // CHECK12-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 4
22907 // CHECK12-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
22908 // CHECK12-NEXT:    store i8* null, i8** [[TMP92]], align 4
22909 // CHECK12-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
22910 // CHECK12-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
22911 // CHECK12-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 4
22912 // CHECK12-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
22913 // CHECK12-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
22914 // CHECK12-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 4
22915 // CHECK12-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
22916 // CHECK12-NEXT:    store i8* null, i8** [[TMP97]], align 4
22917 // CHECK12-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
22918 // CHECK12-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
22919 // CHECK12-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
22920 // CHECK12-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
22921 // CHECK12-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
22922 // CHECK12-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
22923 // CHECK12-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
22924 // CHECK12-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
22925 // CHECK12-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
22926 // CHECK12-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
22927 // CHECK12-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
22928 // CHECK12-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
22929 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
22930 // CHECK12-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22931 // CHECK12-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
22932 // CHECK12-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
22933 // CHECK12:       omp_offload.failed27:
22934 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP67]], i32 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
22935 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
22936 // CHECK12:       omp_offload.cont28:
22937 // CHECK12-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
22938 // CHECK12-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
22939 // CHECK12-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
22940 // CHECK12-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 4
22941 // CHECK12-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 4
22942 // CHECK12-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 4
22943 // CHECK12-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
22944 // CHECK12-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
22945 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
22946 // CHECK12-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
22947 // CHECK12-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
22948 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
22949 // CHECK12-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
22950 // CHECK12-NEXT:    store i8* null, i8** [[TMP115]], align 4
22951 // CHECK12-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
22952 // CHECK12-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
22953 // CHECK12-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 4
22954 // CHECK12-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
22955 // CHECK12-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
22956 // CHECK12-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 4
22957 // CHECK12-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
22958 // CHECK12-NEXT:    store i8* null, i8** [[TMP120]], align 4
22959 // CHECK12-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
22960 // CHECK12-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
22961 // CHECK12-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 4
22962 // CHECK12-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
22963 // CHECK12-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
22964 // CHECK12-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 4
22965 // CHECK12-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
22966 // CHECK12-NEXT:    store i8* null, i8** [[TMP125]], align 4
22967 // CHECK12-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
22968 // CHECK12-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
22969 // CHECK12-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 4
22970 // CHECK12-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
22971 // CHECK12-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
22972 // CHECK12-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 4
22973 // CHECK12-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
22974 // CHECK12-NEXT:    store i8* null, i8** [[TMP130]], align 4
22975 // CHECK12-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
22976 // CHECK12-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
22977 // CHECK12-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
22978 // CHECK12-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
22979 // CHECK12-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
22980 // CHECK12-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
22981 // CHECK12-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
22982 // CHECK12-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
22983 // CHECK12-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
22984 // CHECK12-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
22985 // CHECK12-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
22986 // CHECK12-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
22987 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
22988 // CHECK12-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22989 // CHECK12-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
22990 // CHECK12-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
22991 // CHECK12:       omp_offload.failed40:
22992 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
22993 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
22994 // CHECK12:       omp_offload.cont41:
22995 // CHECK12-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
22996 // CHECK12-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
22997 // CHECK12-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
22998 // CHECK12-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
22999 // CHECK12-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
23000 // CHECK12-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
23001 // CHECK12-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 4
23002 // CHECK12-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 4
23003 // CHECK12-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 4
23004 // CHECK12-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
23005 // CHECK12-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
23006 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
23007 // CHECK12-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
23008 // CHECK12-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
23009 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
23010 // CHECK12-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
23011 // CHECK12-NEXT:    store i8* null, i8** [[TMP150]], align 4
23012 // CHECK12-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
23013 // CHECK12-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
23014 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
23015 // CHECK12-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
23016 // CHECK12-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
23017 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
23018 // CHECK12-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
23019 // CHECK12-NEXT:    store i8* null, i8** [[TMP155]], align 4
23020 // CHECK12-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
23021 // CHECK12-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
23022 // CHECK12-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 4
23023 // CHECK12-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
23024 // CHECK12-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
23025 // CHECK12-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 4
23026 // CHECK12-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
23027 // CHECK12-NEXT:    store i8* null, i8** [[TMP160]], align 4
23028 // CHECK12-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
23029 // CHECK12-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
23030 // CHECK12-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 4
23031 // CHECK12-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
23032 // CHECK12-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
23033 // CHECK12-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 4
23034 // CHECK12-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
23035 // CHECK12-NEXT:    store i8* null, i8** [[TMP165]], align 4
23036 // CHECK12-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
23037 // CHECK12-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
23038 // CHECK12-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 4
23039 // CHECK12-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
23040 // CHECK12-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
23041 // CHECK12-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 4
23042 // CHECK12-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
23043 // CHECK12-NEXT:    store i8* null, i8** [[TMP170]], align 4
23044 // CHECK12-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
23045 // CHECK12-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
23046 // CHECK12-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
23047 // CHECK12-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
23048 // CHECK12-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
23049 // CHECK12-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
23050 // CHECK12-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
23051 // CHECK12-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
23052 // CHECK12-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
23053 // CHECK12-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
23054 // CHECK12-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
23055 // CHECK12-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
23056 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
23057 // CHECK12-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23058 // CHECK12-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
23059 // CHECK12-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
23060 // CHECK12:       omp_offload.failed54:
23061 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP140]], i32 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
23062 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
23063 // CHECK12:       omp_offload.cont55:
23064 // CHECK12-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
23065 // CHECK12-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
23066 // CHECK12-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
23067 // CHECK12-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 4
23068 // CHECK12-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 4
23069 // CHECK12-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 4
23070 // CHECK12-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
23071 // CHECK12-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
23072 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
23073 // CHECK12-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
23074 // CHECK12-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
23075 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
23076 // CHECK12-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
23077 // CHECK12-NEXT:    store i8* null, i8** [[TMP188]], align 4
23078 // CHECK12-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
23079 // CHECK12-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
23080 // CHECK12-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 4
23081 // CHECK12-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
23082 // CHECK12-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
23083 // CHECK12-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 4
23084 // CHECK12-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
23085 // CHECK12-NEXT:    store i8* null, i8** [[TMP193]], align 4
23086 // CHECK12-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
23087 // CHECK12-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
23088 // CHECK12-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 4
23089 // CHECK12-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
23090 // CHECK12-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
23091 // CHECK12-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 4
23092 // CHECK12-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
23093 // CHECK12-NEXT:    store i8* null, i8** [[TMP198]], align 4
23094 // CHECK12-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
23095 // CHECK12-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
23096 // CHECK12-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 4
23097 // CHECK12-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
23098 // CHECK12-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
23099 // CHECK12-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 4
23100 // CHECK12-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
23101 // CHECK12-NEXT:    store i8* null, i8** [[TMP203]], align 4
23102 // CHECK12-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
23103 // CHECK12-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
23104 // CHECK12-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
23105 // CHECK12-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
23106 // CHECK12-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
23107 // CHECK12-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
23108 // CHECK12-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
23109 // CHECK12-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
23110 // CHECK12-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
23111 // CHECK12-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
23112 // CHECK12-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
23113 // CHECK12-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
23114 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
23115 // CHECK12-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23116 // CHECK12-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
23117 // CHECK12-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
23118 // CHECK12:       omp_offload.failed67:
23119 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
23120 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
23121 // CHECK12:       omp_offload.cont68:
23122 // CHECK12-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
23123 // CHECK12-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
23124 // CHECK12-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
23125 // CHECK12-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
23126 // CHECK12-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
23127 // CHECK12-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
23128 // CHECK12-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 4
23129 // CHECK12-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 4
23130 // CHECK12-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 4
23131 // CHECK12-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
23132 // CHECK12-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
23133 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
23134 // CHECK12-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
23135 // CHECK12-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
23136 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
23137 // CHECK12-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
23138 // CHECK12-NEXT:    store i8* null, i8** [[TMP223]], align 4
23139 // CHECK12-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
23140 // CHECK12-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
23141 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
23142 // CHECK12-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
23143 // CHECK12-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
23144 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
23145 // CHECK12-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
23146 // CHECK12-NEXT:    store i8* null, i8** [[TMP228]], align 4
23147 // CHECK12-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
23148 // CHECK12-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
23149 // CHECK12-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 4
23150 // CHECK12-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
23151 // CHECK12-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
23152 // CHECK12-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 4
23153 // CHECK12-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
23154 // CHECK12-NEXT:    store i8* null, i8** [[TMP233]], align 4
23155 // CHECK12-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
23156 // CHECK12-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
23157 // CHECK12-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 4
23158 // CHECK12-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
23159 // CHECK12-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
23160 // CHECK12-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 4
23161 // CHECK12-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
23162 // CHECK12-NEXT:    store i8* null, i8** [[TMP238]], align 4
23163 // CHECK12-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
23164 // CHECK12-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
23165 // CHECK12-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 4
23166 // CHECK12-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
23167 // CHECK12-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
23168 // CHECK12-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 4
23169 // CHECK12-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
23170 // CHECK12-NEXT:    store i8* null, i8** [[TMP243]], align 4
23171 // CHECK12-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
23172 // CHECK12-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
23173 // CHECK12-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
23174 // CHECK12-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
23175 // CHECK12-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
23176 // CHECK12-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
23177 // CHECK12-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
23178 // CHECK12-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
23179 // CHECK12-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
23180 // CHECK12-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
23181 // CHECK12-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
23182 // CHECK12-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
23183 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
23184 // CHECK12-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23185 // CHECK12-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
23186 // CHECK12-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
23187 // CHECK12:       omp_offload.failed81:
23188 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP213]], i32 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
23189 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
23190 // CHECK12:       omp_offload.cont82:
23191 // CHECK12-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
23192 // CHECK12-NEXT:    ret i32 [[CALL]]
23193 //
23194 //
23195 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
23196 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
23197 // CHECK12-NEXT:  entry:
23198 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23199 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23200 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23201 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23202 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23203 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23204 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23205 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23206 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23207 // CHECK12-NEXT:    ret void
23208 //
23209 //
23210 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
23211 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23212 // CHECK12-NEXT:  entry:
23213 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23214 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23215 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23216 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23217 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23218 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23219 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23220 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23221 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23222 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23223 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23224 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23225 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23226 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23227 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23228 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23229 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23230 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23231 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23232 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23233 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23234 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23235 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23236 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23237 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23238 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23239 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23240 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23241 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23242 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23243 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23244 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23245 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23246 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23247 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23248 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23249 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23250 // CHECK12:       omp.precond.then:
23251 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23252 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23253 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
23254 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23255 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23256 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23257 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23258 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23259 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23260 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23261 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
23262 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23263 // CHECK12:       cond.true:
23264 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23265 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23266 // CHECK12:       cond.false:
23267 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23268 // CHECK12-NEXT:    br label [[COND_END]]
23269 // CHECK12:       cond.end:
23270 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
23271 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23272 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23273 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
23274 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23275 // CHECK12:       omp.inner.for.cond:
23276 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23277 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
23278 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
23279 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23280 // CHECK12:       omp.inner.for.body:
23281 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
23282 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
23283 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
23284 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23285 // CHECK12:       omp.inner.for.inc:
23286 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23287 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
23288 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
23289 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23290 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
23291 // CHECK12:       omp.inner.for.end:
23292 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23293 // CHECK12:       omp.loop.exit:
23294 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23295 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
23296 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
23297 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23298 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
23299 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23300 // CHECK12:       .omp.final.then:
23301 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23302 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
23303 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
23304 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
23305 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
23306 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
23307 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23308 // CHECK12:       .omp.final.done:
23309 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23310 // CHECK12:       omp.precond.end:
23311 // CHECK12-NEXT:    ret void
23312 //
23313 //
23314 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
23315 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23316 // CHECK12-NEXT:  entry:
23317 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23318 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23319 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23320 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23321 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23322 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23323 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23324 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23325 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23326 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23327 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23328 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23329 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23330 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23331 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23332 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23333 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23334 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23335 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23336 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23337 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23338 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23339 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23340 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23341 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23342 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23343 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23344 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23345 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23346 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23347 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23348 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23349 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23350 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23351 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23352 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23353 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23354 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23355 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23356 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23357 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23358 // CHECK12:       omp.precond.then:
23359 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23360 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23361 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23362 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23363 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23364 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23365 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23366 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23367 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23368 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23369 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23370 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23371 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23372 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23373 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23374 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23375 // CHECK12:       cond.true:
23376 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23377 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23378 // CHECK12:       cond.false:
23379 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23380 // CHECK12-NEXT:    br label [[COND_END]]
23381 // CHECK12:       cond.end:
23382 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23383 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23384 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23385 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23386 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23387 // CHECK12:       omp.inner.for.cond:
23388 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23389 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
23390 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23391 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23392 // CHECK12:       omp.inner.for.body:
23393 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23394 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23395 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23396 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
23397 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
23398 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23399 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23400 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
23401 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
23402 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23403 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23404 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
23405 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23406 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
23407 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23408 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23409 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
23410 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23411 // CHECK12:       omp.body.continue:
23412 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23413 // CHECK12:       omp.inner.for.inc:
23414 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23415 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23416 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23417 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
23418 // CHECK12:       omp.inner.for.end:
23419 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23420 // CHECK12:       omp.loop.exit:
23421 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23422 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23423 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23424 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23425 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23426 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23427 // CHECK12:       .omp.final.then:
23428 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23429 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23430 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23431 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23432 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23433 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23434 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23435 // CHECK12:       .omp.final.done:
23436 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23437 // CHECK12:       omp.precond.end:
23438 // CHECK12-NEXT:    ret void
23439 //
23440 //
23441 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
23442 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23443 // CHECK12-NEXT:  entry:
23444 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23445 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23446 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23447 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23448 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23449 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23450 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23451 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23452 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23453 // CHECK12-NEXT:    ret void
23454 //
23455 //
23456 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2
23457 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23458 // CHECK12-NEXT:  entry:
23459 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23460 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23461 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23462 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23463 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23464 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23465 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23466 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23467 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23468 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23469 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23470 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23471 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23472 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23473 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23474 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23475 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23476 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23477 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23478 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23479 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23480 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23481 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23482 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23483 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23484 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23485 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23486 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23487 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23488 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23489 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23490 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23491 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23492 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23493 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23494 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23495 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23496 // CHECK12:       omp.precond.then:
23497 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23498 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23499 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
23500 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23501 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23502 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23503 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23504 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23505 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23506 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23507 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
23508 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23509 // CHECK12:       cond.true:
23510 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23511 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23512 // CHECK12:       cond.false:
23513 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23514 // CHECK12-NEXT:    br label [[COND_END]]
23515 // CHECK12:       cond.end:
23516 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
23517 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23518 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23519 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
23520 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23521 // CHECK12:       omp.inner.for.cond:
23522 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23523 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
23524 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
23525 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23526 // CHECK12:       omp.inner.for.body:
23527 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
23528 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
23529 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
23530 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23531 // CHECK12:       omp.inner.for.inc:
23532 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23533 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
23534 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
23535 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23536 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
23537 // CHECK12:       omp.inner.for.end:
23538 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23539 // CHECK12:       omp.loop.exit:
23540 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23541 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
23542 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
23543 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23544 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
23545 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23546 // CHECK12:       .omp.final.then:
23547 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23548 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
23549 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
23550 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
23551 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
23552 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
23553 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23554 // CHECK12:       .omp.final.done:
23555 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23556 // CHECK12:       omp.precond.end:
23557 // CHECK12-NEXT:    ret void
23558 //
23559 //
23560 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3
23561 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23562 // CHECK12-NEXT:  entry:
23563 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23564 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23565 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23566 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23567 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23568 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23569 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23570 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23571 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23572 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23573 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23574 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23575 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23576 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23577 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23578 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23579 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23580 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23581 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23582 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23583 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23584 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23585 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23586 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23587 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23588 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23589 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23590 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23591 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23592 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23593 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23594 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23595 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23596 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23597 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23598 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23599 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23600 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23601 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23602 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23603 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23604 // CHECK12:       omp.precond.then:
23605 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23606 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23607 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23608 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23609 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23610 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23611 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23612 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23613 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23614 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23615 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23616 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23617 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23618 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23619 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23620 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23621 // CHECK12:       cond.true:
23622 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23623 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23624 // CHECK12:       cond.false:
23625 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23626 // CHECK12-NEXT:    br label [[COND_END]]
23627 // CHECK12:       cond.end:
23628 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23629 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23630 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23631 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23632 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23633 // CHECK12:       omp.inner.for.cond:
23634 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23635 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
23636 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23637 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23638 // CHECK12:       omp.inner.for.body:
23639 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23640 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23641 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23642 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
23643 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
23644 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23645 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23646 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
23647 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
23648 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23649 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23650 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
23651 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23652 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
23653 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23654 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23655 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
23656 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23657 // CHECK12:       omp.body.continue:
23658 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23659 // CHECK12:       omp.inner.for.inc:
23660 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23661 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23662 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23663 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
23664 // CHECK12:       omp.inner.for.end:
23665 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23666 // CHECK12:       omp.loop.exit:
23667 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23668 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23669 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23670 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23671 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23672 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23673 // CHECK12:       .omp.final.then:
23674 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23675 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23676 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23677 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23678 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23679 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23680 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23681 // CHECK12:       .omp.final.done:
23682 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23683 // CHECK12:       omp.precond.end:
23684 // CHECK12-NEXT:    ret void
23685 //
23686 //
23687 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
23688 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23689 // CHECK12-NEXT:  entry:
23690 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
23691 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23692 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23693 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23694 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23695 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
23696 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23697 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23698 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23699 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23700 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23701 // CHECK12-NEXT:    ret void
23702 //
23703 //
23704 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6
23705 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23706 // CHECK12-NEXT:  entry:
23707 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23708 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23709 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
23710 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23711 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23712 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23713 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23714 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23715 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23716 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23717 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23718 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23719 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23720 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23721 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23722 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23723 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23724 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23725 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23726 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
23727 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23728 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23729 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23730 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23731 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
23732 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23733 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
23734 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
23735 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
23736 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
23737 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
23738 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23739 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
23740 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23741 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23742 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23743 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23744 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23745 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
23746 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23747 // CHECK12:       omp.precond.then:
23748 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23749 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23750 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
23751 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23752 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23753 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
23754 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23755 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23756 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
23757 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23758 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23759 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23760 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23761 // CHECK12:       cond.true:
23762 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23763 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23764 // CHECK12:       cond.false:
23765 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23766 // CHECK12-NEXT:    br label [[COND_END]]
23767 // CHECK12:       cond.end:
23768 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23769 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23770 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23771 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23772 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23773 // CHECK12:       omp.inner.for.cond:
23774 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23775 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23776 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
23777 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
23778 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23779 // CHECK12:       omp.inner.for.body:
23780 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23781 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23782 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
23783 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23784 // CHECK12:       omp.inner.for.inc:
23785 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23786 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23787 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
23788 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23789 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23790 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23791 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
23792 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23793 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23794 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23795 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
23796 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23797 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23798 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23799 // CHECK12-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
23800 // CHECK12-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
23801 // CHECK12:       cond.true10:
23802 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23803 // CHECK12-NEXT:    br label [[COND_END12:%.*]]
23804 // CHECK12:       cond.false11:
23805 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23806 // CHECK12-NEXT:    br label [[COND_END12]]
23807 // CHECK12:       cond.end12:
23808 // CHECK12-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
23809 // CHECK12-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23810 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23811 // CHECK12-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23812 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
23813 // CHECK12:       omp.inner.for.end:
23814 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23815 // CHECK12:       omp.loop.exit:
23816 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23817 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
23818 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
23819 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23820 // CHECK12-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
23821 // CHECK12-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23822 // CHECK12:       .omp.final.then:
23823 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23824 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
23825 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
23826 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
23827 // CHECK12-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
23828 // CHECK12-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
23829 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23830 // CHECK12:       .omp.final.done:
23831 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23832 // CHECK12:       omp.precond.end:
23833 // CHECK12-NEXT:    ret void
23834 //
23835 //
23836 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7
23837 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23838 // CHECK12-NEXT:  entry:
23839 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23840 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23841 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23842 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23843 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23844 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23845 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23846 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23847 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23848 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23849 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23850 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23851 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23852 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23853 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23854 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23855 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23856 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23857 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23858 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23859 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23860 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23861 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23862 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23863 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23864 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23865 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23866 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23867 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23868 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23869 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23870 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23871 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23872 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23873 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23874 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23875 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23876 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23877 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23878 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23879 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23880 // CHECK12:       omp.precond.then:
23881 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23882 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23883 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23884 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23885 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23886 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23887 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23888 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23889 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23890 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23891 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23892 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23893 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23894 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23895 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23896 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23897 // CHECK12:       cond.true:
23898 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23899 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23900 // CHECK12:       cond.false:
23901 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23902 // CHECK12-NEXT:    br label [[COND_END]]
23903 // CHECK12:       cond.end:
23904 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23905 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23906 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23907 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23908 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23909 // CHECK12:       omp.inner.for.cond:
23910 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23911 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
23912 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23913 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23914 // CHECK12:       omp.inner.for.body:
23915 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23916 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23917 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23918 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
23919 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
23920 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23921 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23922 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
23923 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
23924 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23925 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23926 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
23927 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23928 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
23929 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23930 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23931 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
23932 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23933 // CHECK12:       omp.body.continue:
23934 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23935 // CHECK12:       omp.inner.for.inc:
23936 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23937 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23938 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23939 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
23940 // CHECK12:       omp.inner.for.end:
23941 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23942 // CHECK12:       omp.loop.exit:
23943 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23944 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23945 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23946 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23947 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23948 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23949 // CHECK12:       .omp.final.then:
23950 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23951 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23952 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23953 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23954 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23955 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23956 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23957 // CHECK12:       .omp.final.done:
23958 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23959 // CHECK12:       omp.precond.end:
23960 // CHECK12-NEXT:    ret void
23961 //
23962 //
23963 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
23964 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23965 // CHECK12-NEXT:  entry:
23966 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23967 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23968 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23969 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23970 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23971 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23972 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23973 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23974 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23975 // CHECK12-NEXT:    ret void
23976 //
23977 //
23978 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10
23979 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23980 // CHECK12-NEXT:  entry:
23981 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23982 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23983 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23984 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23985 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23986 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23987 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23988 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23989 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23990 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23991 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23992 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23993 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23994 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23995 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23996 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23997 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23998 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23999 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24000 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24001 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24002 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24003 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24004 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24005 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24006 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24007 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24008 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24009 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24010 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24011 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24012 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24013 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24014 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24015 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24016 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24017 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24018 // CHECK12:       omp.precond.then:
24019 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24020 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24021 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
24022 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24023 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24024 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24025 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24026 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24027 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24028 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24029 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
24030 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24031 // CHECK12:       cond.true:
24032 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24033 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24034 // CHECK12:       cond.false:
24035 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24036 // CHECK12-NEXT:    br label [[COND_END]]
24037 // CHECK12:       cond.end:
24038 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
24039 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24040 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24041 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
24042 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24043 // CHECK12:       omp.inner.for.cond:
24044 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24045 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
24046 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
24047 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24048 // CHECK12:       omp.inner.for.body:
24049 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
24050 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
24051 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
24052 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24053 // CHECK12:       omp.inner.for.inc:
24054 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24055 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
24056 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
24057 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24058 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
24059 // CHECK12:       omp.inner.for.end:
24060 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24061 // CHECK12:       omp.loop.exit:
24062 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24063 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
24064 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
24065 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24066 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
24067 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24068 // CHECK12:       .omp.final.then:
24069 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24070 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
24071 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
24072 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
24073 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
24074 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
24075 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24076 // CHECK12:       .omp.final.done:
24077 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24078 // CHECK12:       omp.precond.end:
24079 // CHECK12-NEXT:    ret void
24080 //
24081 //
24082 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11
24083 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24084 // CHECK12-NEXT:  entry:
24085 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24086 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24087 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24088 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24089 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24090 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24091 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24092 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24093 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24094 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24095 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24096 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24097 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24098 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24099 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24100 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24101 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24102 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24103 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24104 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24105 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24106 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24107 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24108 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24109 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24110 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24111 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24112 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24113 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24114 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24115 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24116 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24117 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24118 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24119 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24120 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24121 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24122 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24123 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24124 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24125 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24126 // CHECK12:       omp.precond.then:
24127 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24128 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24129 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24130 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24131 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24132 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24133 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24134 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24135 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24136 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24137 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24138 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24139 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24140 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24141 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24142 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24143 // CHECK12:       cond.true:
24144 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24145 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24146 // CHECK12:       cond.false:
24147 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24148 // CHECK12-NEXT:    br label [[COND_END]]
24149 // CHECK12:       cond.end:
24150 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24151 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24152 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24153 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24154 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24155 // CHECK12:       omp.inner.for.cond:
24156 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24157 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
24158 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24159 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24160 // CHECK12:       omp.inner.for.body:
24161 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24162 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
24163 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24164 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
24165 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
24166 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24167 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
24168 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
24169 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
24170 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24171 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
24172 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
24173 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
24174 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
24175 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24176 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
24177 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
24178 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24179 // CHECK12:       omp.body.continue:
24180 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24181 // CHECK12:       omp.inner.for.inc:
24182 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24183 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
24184 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24185 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
24186 // CHECK12:       omp.inner.for.end:
24187 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24188 // CHECK12:       omp.loop.exit:
24189 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24190 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
24191 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
24192 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24193 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
24194 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24195 // CHECK12:       .omp.final.then:
24196 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24197 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
24198 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
24199 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
24200 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
24201 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
24202 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24203 // CHECK12:       .omp.final.done:
24204 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24205 // CHECK12:       omp.precond.end:
24206 // CHECK12-NEXT:    ret void
24207 //
24208 //
24209 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
24210 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24211 // CHECK12-NEXT:  entry:
24212 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
24213 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24214 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24215 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24216 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24217 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
24218 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24219 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24220 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24221 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24222 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24223 // CHECK12-NEXT:    ret void
24224 //
24225 //
24226 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14
24227 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24228 // CHECK12-NEXT:  entry:
24229 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24230 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24231 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
24232 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24233 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24234 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24235 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24236 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24237 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24238 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24239 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24240 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24241 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24242 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24243 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24244 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24245 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24246 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24247 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24248 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24249 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24250 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
24251 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24252 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24253 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24254 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24255 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
24256 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24257 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
24258 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
24259 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
24260 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
24261 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
24262 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
24263 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24264 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24265 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
24266 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24267 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24268 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24269 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24270 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24271 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
24272 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24273 // CHECK12:       omp.precond.then:
24274 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24275 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24276 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
24277 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24278 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24279 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24280 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24281 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24282 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24283 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24284 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24285 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24286 // CHECK12:       cond.true:
24287 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24288 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24289 // CHECK12:       cond.false:
24290 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24291 // CHECK12-NEXT:    br label [[COND_END]]
24292 // CHECK12:       cond.end:
24293 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24294 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24295 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24296 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24297 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24298 // CHECK12:       omp.inner.for.cond:
24299 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24300 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
24301 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24302 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24303 // CHECK12:       omp.inner.for.body:
24304 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
24305 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
24306 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
24307 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
24308 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
24309 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
24310 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24311 // CHECK12:       omp.inner.for.inc:
24312 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24313 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
24314 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
24315 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24316 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
24317 // CHECK12:       omp.inner.for.end:
24318 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24319 // CHECK12:       omp.loop.exit:
24320 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24321 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
24322 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
24323 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24324 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
24325 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24326 // CHECK12:       .omp.final.then:
24327 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24328 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
24329 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
24330 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
24331 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
24332 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
24333 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24334 // CHECK12:       .omp.final.done:
24335 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24336 // CHECK12:       omp.precond.end:
24337 // CHECK12-NEXT:    ret void
24338 //
24339 //
24340 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15
24341 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
24342 // CHECK12-NEXT:  entry:
24343 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24344 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24345 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24346 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24347 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24348 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24349 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24350 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24351 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24352 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24353 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24354 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24355 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24356 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24357 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24358 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24359 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24360 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24361 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24362 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24363 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24364 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24365 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24366 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24367 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24368 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24369 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24370 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24371 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24372 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24373 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24374 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24375 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24376 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24377 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24378 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24379 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24380 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24381 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24382 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24383 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24384 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24385 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24386 // CHECK12:       omp.precond.then:
24387 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24388 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24389 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24390 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24391 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24392 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24393 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24394 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24395 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24396 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24397 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24398 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
24399 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
24400 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24401 // CHECK12:       omp.dispatch.cond:
24402 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24403 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24404 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
24405 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24406 // CHECK12:       cond.true:
24407 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24408 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24409 // CHECK12:       cond.false:
24410 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24411 // CHECK12-NEXT:    br label [[COND_END]]
24412 // CHECK12:       cond.end:
24413 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
24414 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24415 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24416 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
24417 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24418 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24419 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
24420 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24421 // CHECK12:       omp.dispatch.body:
24422 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24423 // CHECK12:       omp.inner.for.cond:
24424 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24425 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
24426 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
24427 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24428 // CHECK12:       omp.inner.for.body:
24429 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24430 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
24431 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24432 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
24433 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
24434 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24435 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
24436 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
24437 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
24438 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24439 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
24440 // CHECK12-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
24441 // CHECK12-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
24442 // CHECK12-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
24443 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24444 // CHECK12-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
24445 // CHECK12-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
24446 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24447 // CHECK12:       omp.body.continue:
24448 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24449 // CHECK12:       omp.inner.for.inc:
24450 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24451 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
24452 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24453 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
24454 // CHECK12:       omp.inner.for.end:
24455 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24456 // CHECK12:       omp.dispatch.inc:
24457 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24458 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24459 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
24460 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
24461 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24462 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24463 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
24464 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
24465 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24466 // CHECK12:       omp.dispatch.end:
24467 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24468 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
24469 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
24470 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24471 // CHECK12-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
24472 // CHECK12-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24473 // CHECK12:       .omp.final.then:
24474 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24475 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
24476 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
24477 // CHECK12-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
24478 // CHECK12-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
24479 // CHECK12-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
24480 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24481 // CHECK12:       .omp.final.done:
24482 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24483 // CHECK12:       omp.precond.end:
24484 // CHECK12-NEXT:    ret void
24485 //
24486 //
24487 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
24488 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24489 // CHECK12-NEXT:  entry:
24490 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24491 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24492 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24493 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24494 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24495 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24496 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24497 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24498 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24499 // CHECK12-NEXT:    ret void
24500 //
24501 //
24502 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18
24503 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24504 // CHECK12-NEXT:  entry:
24505 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24506 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24507 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24508 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24509 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24510 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24511 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24512 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24513 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24514 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24515 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24516 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24517 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24518 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24519 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24520 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24521 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24522 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24523 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24524 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24525 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24526 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24527 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24528 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24529 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24530 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24531 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24532 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24533 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24534 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24535 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24536 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24537 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24538 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24539 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24540 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24541 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24542 // CHECK12:       omp.precond.then:
24543 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24544 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24545 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
24546 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24547 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24548 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24549 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24550 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24551 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24552 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24553 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
24554 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24555 // CHECK12:       cond.true:
24556 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24557 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24558 // CHECK12:       cond.false:
24559 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24560 // CHECK12-NEXT:    br label [[COND_END]]
24561 // CHECK12:       cond.end:
24562 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
24563 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24564 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24565 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
24566 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24567 // CHECK12:       omp.inner.for.cond:
24568 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24569 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
24570 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
24571 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24572 // CHECK12:       omp.inner.for.body:
24573 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
24574 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
24575 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
24576 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24577 // CHECK12:       omp.inner.for.inc:
24578 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24579 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
24580 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
24581 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24582 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
24583 // CHECK12:       omp.inner.for.end:
24584 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24585 // CHECK12:       omp.loop.exit:
24586 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24587 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
24588 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
24589 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24590 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
24591 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24592 // CHECK12:       .omp.final.then:
24593 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24594 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
24595 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
24596 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
24597 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
24598 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
24599 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24600 // CHECK12:       .omp.final.done:
24601 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24602 // CHECK12:       omp.precond.end:
24603 // CHECK12-NEXT:    ret void
24604 //
24605 //
24606 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19
24607 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24608 // CHECK12-NEXT:  entry:
24609 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24610 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24611 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24612 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24613 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24614 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24615 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24616 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24617 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24618 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24619 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24620 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24621 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24622 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24623 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24624 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24625 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24626 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24627 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24628 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24629 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24630 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24631 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24632 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24633 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24634 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24635 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24636 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24637 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24638 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24639 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24640 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24641 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24642 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24643 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24644 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24645 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24646 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24647 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24648 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24649 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24650 // CHECK12:       omp.precond.then:
24651 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24652 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24653 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24654 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24655 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24656 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24657 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24658 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24659 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24660 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24661 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24662 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24663 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
24664 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
24665 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24666 // CHECK12:       omp.dispatch.cond:
24667 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24668 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
24669 // CHECK12-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24670 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
24671 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24672 // CHECK12:       omp.dispatch.body:
24673 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24674 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
24675 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24676 // CHECK12:       omp.inner.for.cond:
24677 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24678 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
24679 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
24680 // CHECK12-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24681 // CHECK12:       omp.inner.for.body:
24682 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24683 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
24684 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24685 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
24686 // CHECK12-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
24687 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24688 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
24689 // CHECK12-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
24690 // CHECK12-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
24691 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24692 // CHECK12-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
24693 // CHECK12-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
24694 // CHECK12-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
24695 // CHECK12-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
24696 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24697 // CHECK12-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
24698 // CHECK12-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
24699 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24700 // CHECK12:       omp.body.continue:
24701 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24702 // CHECK12:       omp.inner.for.inc:
24703 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24704 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
24705 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24706 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
24707 // CHECK12:       omp.inner.for.end:
24708 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24709 // CHECK12:       omp.dispatch.inc:
24710 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24711 // CHECK12:       omp.dispatch.end:
24712 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24713 // CHECK12-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
24714 // CHECK12-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24715 // CHECK12:       .omp.final.then:
24716 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24717 // CHECK12-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
24718 // CHECK12-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
24719 // CHECK12-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
24720 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
24721 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
24722 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24723 // CHECK12:       .omp.final.done:
24724 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24725 // CHECK12:       omp.precond.end:
24726 // CHECK12-NEXT:    ret void
24727 //
24728 //
24729 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
24730 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24731 // CHECK12-NEXT:  entry:
24732 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
24733 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24734 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24735 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24736 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24737 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
24738 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24739 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24740 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24741 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24742 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24743 // CHECK12-NEXT:    ret void
24744 //
24745 //
24746 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..22
24747 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24748 // CHECK12-NEXT:  entry:
24749 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24750 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24751 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
24752 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24753 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24754 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24755 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24756 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24757 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24758 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24759 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24760 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24761 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24762 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24763 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24764 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24765 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24766 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24767 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24768 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24769 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24770 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
24771 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24772 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24773 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24774 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24775 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
24776 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24777 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
24778 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
24779 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
24780 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
24781 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
24782 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
24783 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24784 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24785 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
24786 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24787 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24788 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24789 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24790 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24791 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
24792 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24793 // CHECK12:       omp.precond.then:
24794 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24795 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24796 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
24797 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24798 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24799 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24800 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24801 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24802 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24803 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24804 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24805 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24806 // CHECK12:       cond.true:
24807 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24808 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24809 // CHECK12:       cond.false:
24810 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24811 // CHECK12-NEXT:    br label [[COND_END]]
24812 // CHECK12:       cond.end:
24813 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24814 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24815 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24816 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24817 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24818 // CHECK12:       omp.inner.for.cond:
24819 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24820 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
24821 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24822 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24823 // CHECK12:       omp.inner.for.body:
24824 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
24825 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
24826 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
24827 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
24828 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
24829 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
24830 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24831 // CHECK12:       omp.inner.for.inc:
24832 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24833 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
24834 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
24835 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24836 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
24837 // CHECK12:       omp.inner.for.end:
24838 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24839 // CHECK12:       omp.loop.exit:
24840 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24841 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
24842 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
24843 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24844 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
24845 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24846 // CHECK12:       .omp.final.then:
24847 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24848 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
24849 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
24850 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
24851 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
24852 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
24853 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24854 // CHECK12:       .omp.final.done:
24855 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24856 // CHECK12:       omp.precond.end:
24857 // CHECK12-NEXT:    ret void
24858 //
24859 //
24860 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..23
24861 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
24862 // CHECK12-NEXT:  entry:
24863 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24864 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24865 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24866 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24867 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24868 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24869 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24870 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24871 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24872 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24873 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24874 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24875 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24876 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24877 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24878 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24879 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24880 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24881 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24882 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24883 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24884 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24885 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24886 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24887 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24888 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24889 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24890 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24891 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24892 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24893 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24894 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24895 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24896 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24897 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24898 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24899 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24900 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24901 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24902 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24903 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24904 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24905 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24906 // CHECK12:       omp.precond.then:
24907 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24908 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24909 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24910 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24911 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24912 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24913 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24914 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24915 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24916 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24917 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24918 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24919 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24920 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
24921 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
24922 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24923 // CHECK12:       omp.dispatch.cond:
24924 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24925 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
24926 // CHECK12-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24927 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
24928 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24929 // CHECK12:       omp.dispatch.body:
24930 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24931 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
24932 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24933 // CHECK12:       omp.inner.for.cond:
24934 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24935 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
24936 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
24937 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24938 // CHECK12:       omp.inner.for.body:
24939 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24940 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
24941 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24942 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
24943 // CHECK12-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
24944 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24945 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
24946 // CHECK12-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
24947 // CHECK12-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
24948 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24949 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
24950 // CHECK12-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
24951 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
24952 // CHECK12-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
24953 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24954 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
24955 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
24956 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24957 // CHECK12:       omp.body.continue:
24958 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24959 // CHECK12:       omp.inner.for.inc:
24960 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24961 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
24962 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24963 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
24964 // CHECK12:       omp.inner.for.end:
24965 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24966 // CHECK12:       omp.dispatch.inc:
24967 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24968 // CHECK12:       omp.dispatch.end:
24969 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24970 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
24971 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24972 // CHECK12:       .omp.final.then:
24973 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24974 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
24975 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
24976 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
24977 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
24978 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
24979 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24980 // CHECK12:       .omp.final.done:
24981 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24982 // CHECK12:       omp.precond.end:
24983 // CHECK12-NEXT:    ret void
24984 //
24985 //
24986 // CHECK12-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
24987 // CHECK12-SAME: () #[[ATTR3:[0-9]+]] comdat {
24988 // CHECK12-NEXT:  entry:
24989 // CHECK12-NEXT:    [[A:%.*]] = alloca i32*, align 4
24990 // CHECK12-NEXT:    [[B:%.*]] = alloca i32*, align 4
24991 // CHECK12-NEXT:    [[C:%.*]] = alloca i32*, align 4
24992 // CHECK12-NEXT:    [[N:%.*]] = alloca i32, align 4
24993 // CHECK12-NEXT:    [[CH:%.*]] = alloca i32, align 4
24994 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24995 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
24996 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
24997 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
24998 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24999 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25000 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25001 // CHECK12-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
25002 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
25003 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
25004 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
25005 // CHECK12-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
25006 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
25007 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
25008 // CHECK12-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
25009 // CHECK12-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
25010 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
25011 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
25012 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
25013 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
25014 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
25015 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
25016 // CHECK12-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
25017 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
25018 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
25019 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
25020 // CHECK12-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
25021 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
25022 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
25023 // CHECK12-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
25024 // CHECK12-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
25025 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
25026 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
25027 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
25028 // CHECK12-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
25029 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
25030 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
25031 // CHECK12-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
25032 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
25033 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
25034 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
25035 // CHECK12-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
25036 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
25037 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
25038 // CHECK12-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
25039 // CHECK12-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
25040 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
25041 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
25042 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
25043 // CHECK12-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
25044 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
25045 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
25046 // CHECK12-NEXT:    store i32 10000, i32* [[N]], align 4
25047 // CHECK12-NEXT:    store i32 100, i32* [[CH]], align 4
25048 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
25049 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
25050 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
25051 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
25052 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
25053 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
25054 // CHECK12-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
25055 // CHECK12-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
25056 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
25057 // CHECK12-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
25058 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
25059 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
25060 // CHECK12-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
25061 // CHECK12-NEXT:    store i8* null, i8** [[TMP9]], align 4
25062 // CHECK12-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
25063 // CHECK12-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
25064 // CHECK12-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 4
25065 // CHECK12-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
25066 // CHECK12-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
25067 // CHECK12-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 4
25068 // CHECK12-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
25069 // CHECK12-NEXT:    store i8* null, i8** [[TMP14]], align 4
25070 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
25071 // CHECK12-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
25072 // CHECK12-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 4
25073 // CHECK12-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
25074 // CHECK12-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
25075 // CHECK12-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 4
25076 // CHECK12-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
25077 // CHECK12-NEXT:    store i8* null, i8** [[TMP19]], align 4
25078 // CHECK12-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
25079 // CHECK12-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
25080 // CHECK12-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 4
25081 // CHECK12-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
25082 // CHECK12-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
25083 // CHECK12-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 4
25084 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
25085 // CHECK12-NEXT:    store i8* null, i8** [[TMP24]], align 4
25086 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
25087 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
25088 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
25089 // CHECK12-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
25090 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25091 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
25092 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25093 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25094 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25095 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25096 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
25097 // CHECK12-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
25098 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
25099 // CHECK12-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25100 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25101 // CHECK12-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
25102 // CHECK12:       omp_offload.failed:
25103 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
25104 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT]]
25105 // CHECK12:       omp_offload.cont:
25106 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
25107 // CHECK12-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
25108 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
25109 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 4
25110 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 4
25111 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 4
25112 // CHECK12-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
25113 // CHECK12-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
25114 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
25115 // CHECK12-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
25116 // CHECK12-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
25117 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
25118 // CHECK12-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
25119 // CHECK12-NEXT:    store i8* null, i8** [[TMP42]], align 4
25120 // CHECK12-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
25121 // CHECK12-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
25122 // CHECK12-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 4
25123 // CHECK12-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
25124 // CHECK12-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
25125 // CHECK12-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 4
25126 // CHECK12-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
25127 // CHECK12-NEXT:    store i8* null, i8** [[TMP47]], align 4
25128 // CHECK12-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
25129 // CHECK12-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
25130 // CHECK12-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 4
25131 // CHECK12-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
25132 // CHECK12-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
25133 // CHECK12-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 4
25134 // CHECK12-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
25135 // CHECK12-NEXT:    store i8* null, i8** [[TMP52]], align 4
25136 // CHECK12-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
25137 // CHECK12-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
25138 // CHECK12-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 4
25139 // CHECK12-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
25140 // CHECK12-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
25141 // CHECK12-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 4
25142 // CHECK12-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
25143 // CHECK12-NEXT:    store i8* null, i8** [[TMP57]], align 4
25144 // CHECK12-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
25145 // CHECK12-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
25146 // CHECK12-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
25147 // CHECK12-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
25148 // CHECK12-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
25149 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
25150 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25151 // CHECK12-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
25152 // CHECK12-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
25153 // CHECK12-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
25154 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
25155 // CHECK12-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
25156 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
25157 // CHECK12-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25158 // CHECK12-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
25159 // CHECK12-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
25160 // CHECK12:       omp_offload.failed14:
25161 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
25162 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
25163 // CHECK12:       omp_offload.cont15:
25164 // CHECK12-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
25165 // CHECK12-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
25166 // CHECK12-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
25167 // CHECK12-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
25168 // CHECK12-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
25169 // CHECK12-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
25170 // CHECK12-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 4
25171 // CHECK12-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 4
25172 // CHECK12-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 4
25173 // CHECK12-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
25174 // CHECK12-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
25175 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
25176 // CHECK12-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
25177 // CHECK12-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
25178 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
25179 // CHECK12-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
25180 // CHECK12-NEXT:    store i8* null, i8** [[TMP77]], align 4
25181 // CHECK12-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
25182 // CHECK12-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
25183 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
25184 // CHECK12-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
25185 // CHECK12-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
25186 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
25187 // CHECK12-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
25188 // CHECK12-NEXT:    store i8* null, i8** [[TMP82]], align 4
25189 // CHECK12-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
25190 // CHECK12-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
25191 // CHECK12-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 4
25192 // CHECK12-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
25193 // CHECK12-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
25194 // CHECK12-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 4
25195 // CHECK12-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
25196 // CHECK12-NEXT:    store i8* null, i8** [[TMP87]], align 4
25197 // CHECK12-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
25198 // CHECK12-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
25199 // CHECK12-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 4
25200 // CHECK12-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
25201 // CHECK12-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
25202 // CHECK12-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 4
25203 // CHECK12-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
25204 // CHECK12-NEXT:    store i8* null, i8** [[TMP92]], align 4
25205 // CHECK12-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
25206 // CHECK12-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
25207 // CHECK12-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 4
25208 // CHECK12-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
25209 // CHECK12-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
25210 // CHECK12-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 4
25211 // CHECK12-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
25212 // CHECK12-NEXT:    store i8* null, i8** [[TMP97]], align 4
25213 // CHECK12-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
25214 // CHECK12-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
25215 // CHECK12-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
25216 // CHECK12-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
25217 // CHECK12-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
25218 // CHECK12-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
25219 // CHECK12-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
25220 // CHECK12-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
25221 // CHECK12-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
25222 // CHECK12-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
25223 // CHECK12-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
25224 // CHECK12-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
25225 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
25226 // CHECK12-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25227 // CHECK12-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
25228 // CHECK12-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
25229 // CHECK12:       omp_offload.failed27:
25230 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP67]], i32 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
25231 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
25232 // CHECK12:       omp_offload.cont28:
25233 // CHECK12-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
25234 // CHECK12-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
25235 // CHECK12-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
25236 // CHECK12-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 4
25237 // CHECK12-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 4
25238 // CHECK12-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 4
25239 // CHECK12-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
25240 // CHECK12-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
25241 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
25242 // CHECK12-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
25243 // CHECK12-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
25244 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
25245 // CHECK12-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
25246 // CHECK12-NEXT:    store i8* null, i8** [[TMP115]], align 4
25247 // CHECK12-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
25248 // CHECK12-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
25249 // CHECK12-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 4
25250 // CHECK12-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
25251 // CHECK12-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
25252 // CHECK12-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 4
25253 // CHECK12-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
25254 // CHECK12-NEXT:    store i8* null, i8** [[TMP120]], align 4
25255 // CHECK12-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
25256 // CHECK12-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
25257 // CHECK12-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 4
25258 // CHECK12-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
25259 // CHECK12-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
25260 // CHECK12-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 4
25261 // CHECK12-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
25262 // CHECK12-NEXT:    store i8* null, i8** [[TMP125]], align 4
25263 // CHECK12-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
25264 // CHECK12-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
25265 // CHECK12-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 4
25266 // CHECK12-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
25267 // CHECK12-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
25268 // CHECK12-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 4
25269 // CHECK12-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
25270 // CHECK12-NEXT:    store i8* null, i8** [[TMP130]], align 4
25271 // CHECK12-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
25272 // CHECK12-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
25273 // CHECK12-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
25274 // CHECK12-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
25275 // CHECK12-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
25276 // CHECK12-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
25277 // CHECK12-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
25278 // CHECK12-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
25279 // CHECK12-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
25280 // CHECK12-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
25281 // CHECK12-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
25282 // CHECK12-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
25283 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
25284 // CHECK12-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25285 // CHECK12-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
25286 // CHECK12-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
25287 // CHECK12:       omp_offload.failed40:
25288 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
25289 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
25290 // CHECK12:       omp_offload.cont41:
25291 // CHECK12-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
25292 // CHECK12-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
25293 // CHECK12-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
25294 // CHECK12-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
25295 // CHECK12-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
25296 // CHECK12-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
25297 // CHECK12-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 4
25298 // CHECK12-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 4
25299 // CHECK12-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 4
25300 // CHECK12-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
25301 // CHECK12-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
25302 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
25303 // CHECK12-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
25304 // CHECK12-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
25305 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
25306 // CHECK12-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
25307 // CHECK12-NEXT:    store i8* null, i8** [[TMP150]], align 4
25308 // CHECK12-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
25309 // CHECK12-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
25310 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
25311 // CHECK12-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
25312 // CHECK12-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
25313 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
25314 // CHECK12-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
25315 // CHECK12-NEXT:    store i8* null, i8** [[TMP155]], align 4
25316 // CHECK12-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
25317 // CHECK12-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
25318 // CHECK12-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 4
25319 // CHECK12-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
25320 // CHECK12-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
25321 // CHECK12-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 4
25322 // CHECK12-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
25323 // CHECK12-NEXT:    store i8* null, i8** [[TMP160]], align 4
25324 // CHECK12-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
25325 // CHECK12-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
25326 // CHECK12-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 4
25327 // CHECK12-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
25328 // CHECK12-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
25329 // CHECK12-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 4
25330 // CHECK12-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
25331 // CHECK12-NEXT:    store i8* null, i8** [[TMP165]], align 4
25332 // CHECK12-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
25333 // CHECK12-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
25334 // CHECK12-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 4
25335 // CHECK12-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
25336 // CHECK12-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
25337 // CHECK12-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 4
25338 // CHECK12-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
25339 // CHECK12-NEXT:    store i8* null, i8** [[TMP170]], align 4
25340 // CHECK12-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
25341 // CHECK12-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
25342 // CHECK12-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
25343 // CHECK12-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
25344 // CHECK12-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
25345 // CHECK12-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
25346 // CHECK12-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
25347 // CHECK12-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
25348 // CHECK12-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
25349 // CHECK12-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
25350 // CHECK12-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
25351 // CHECK12-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
25352 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
25353 // CHECK12-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25354 // CHECK12-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
25355 // CHECK12-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
25356 // CHECK12:       omp_offload.failed54:
25357 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP140]], i32 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
25358 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
25359 // CHECK12:       omp_offload.cont55:
25360 // CHECK12-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
25361 // CHECK12-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
25362 // CHECK12-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
25363 // CHECK12-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 4
25364 // CHECK12-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 4
25365 // CHECK12-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 4
25366 // CHECK12-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
25367 // CHECK12-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
25368 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
25369 // CHECK12-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
25370 // CHECK12-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
25371 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
25372 // CHECK12-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
25373 // CHECK12-NEXT:    store i8* null, i8** [[TMP188]], align 4
25374 // CHECK12-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
25375 // CHECK12-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
25376 // CHECK12-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 4
25377 // CHECK12-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
25378 // CHECK12-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
25379 // CHECK12-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 4
25380 // CHECK12-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
25381 // CHECK12-NEXT:    store i8* null, i8** [[TMP193]], align 4
25382 // CHECK12-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
25383 // CHECK12-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
25384 // CHECK12-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 4
25385 // CHECK12-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
25386 // CHECK12-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
25387 // CHECK12-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 4
25388 // CHECK12-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
25389 // CHECK12-NEXT:    store i8* null, i8** [[TMP198]], align 4
25390 // CHECK12-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
25391 // CHECK12-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
25392 // CHECK12-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 4
25393 // CHECK12-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
25394 // CHECK12-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
25395 // CHECK12-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 4
25396 // CHECK12-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
25397 // CHECK12-NEXT:    store i8* null, i8** [[TMP203]], align 4
25398 // CHECK12-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
25399 // CHECK12-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
25400 // CHECK12-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
25401 // CHECK12-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
25402 // CHECK12-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
25403 // CHECK12-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
25404 // CHECK12-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
25405 // CHECK12-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
25406 // CHECK12-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
25407 // CHECK12-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
25408 // CHECK12-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
25409 // CHECK12-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
25410 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
25411 // CHECK12-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25412 // CHECK12-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
25413 // CHECK12-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
25414 // CHECK12:       omp_offload.failed67:
25415 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
25416 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
25417 // CHECK12:       omp_offload.cont68:
25418 // CHECK12-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
25419 // CHECK12-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
25420 // CHECK12-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
25421 // CHECK12-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
25422 // CHECK12-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
25423 // CHECK12-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
25424 // CHECK12-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 4
25425 // CHECK12-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 4
25426 // CHECK12-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 4
25427 // CHECK12-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
25428 // CHECK12-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
25429 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
25430 // CHECK12-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
25431 // CHECK12-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
25432 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
25433 // CHECK12-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
25434 // CHECK12-NEXT:    store i8* null, i8** [[TMP223]], align 4
25435 // CHECK12-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
25436 // CHECK12-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
25437 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
25438 // CHECK12-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
25439 // CHECK12-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
25440 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
25441 // CHECK12-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
25442 // CHECK12-NEXT:    store i8* null, i8** [[TMP228]], align 4
25443 // CHECK12-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
25444 // CHECK12-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
25445 // CHECK12-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 4
25446 // CHECK12-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
25447 // CHECK12-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
25448 // CHECK12-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 4
25449 // CHECK12-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
25450 // CHECK12-NEXT:    store i8* null, i8** [[TMP233]], align 4
25451 // CHECK12-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
25452 // CHECK12-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
25453 // CHECK12-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 4
25454 // CHECK12-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
25455 // CHECK12-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
25456 // CHECK12-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 4
25457 // CHECK12-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
25458 // CHECK12-NEXT:    store i8* null, i8** [[TMP238]], align 4
25459 // CHECK12-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
25460 // CHECK12-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
25461 // CHECK12-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 4
25462 // CHECK12-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
25463 // CHECK12-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
25464 // CHECK12-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 4
25465 // CHECK12-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
25466 // CHECK12-NEXT:    store i8* null, i8** [[TMP243]], align 4
25467 // CHECK12-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
25468 // CHECK12-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
25469 // CHECK12-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
25470 // CHECK12-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
25471 // CHECK12-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
25472 // CHECK12-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
25473 // CHECK12-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
25474 // CHECK12-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
25475 // CHECK12-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
25476 // CHECK12-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
25477 // CHECK12-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
25478 // CHECK12-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
25479 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
25480 // CHECK12-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25481 // CHECK12-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
25482 // CHECK12-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
25483 // CHECK12:       omp_offload.failed81:
25484 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP213]], i32 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
25485 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
25486 // CHECK12:       omp_offload.cont82:
25487 // CHECK12-NEXT:    ret i32 0
25488 //
25489 //
25490 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
25491 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25492 // CHECK12-NEXT:  entry:
25493 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25494 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25495 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25496 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25497 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25498 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25499 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
25500 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
25501 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
25502 // CHECK12-NEXT:    ret void
25503 //
25504 //
25505 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..26
25506 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25507 // CHECK12-NEXT:  entry:
25508 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25509 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25510 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25511 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25512 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25513 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25514 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25515 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25516 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25517 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25518 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25519 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25520 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25521 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25522 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25523 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25524 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25525 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25526 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25527 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25528 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25529 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25530 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25531 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25532 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25533 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25534 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25535 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25536 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25537 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25538 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25539 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25540 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25541 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25542 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25543 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25544 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25545 // CHECK12:       omp.precond.then:
25546 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25547 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25548 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
25549 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25550 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25551 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25552 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25553 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25554 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25555 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25556 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25557 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25558 // CHECK12:       cond.true:
25559 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25560 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25561 // CHECK12:       cond.false:
25562 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25563 // CHECK12-NEXT:    br label [[COND_END]]
25564 // CHECK12:       cond.end:
25565 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25566 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25567 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25568 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25569 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25570 // CHECK12:       omp.inner.for.cond:
25571 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25572 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
25573 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25574 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25575 // CHECK12:       omp.inner.for.body:
25576 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
25577 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
25578 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
25579 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25580 // CHECK12:       omp.inner.for.inc:
25581 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25582 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
25583 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25584 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25585 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
25586 // CHECK12:       omp.inner.for.end:
25587 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25588 // CHECK12:       omp.loop.exit:
25589 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25590 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25591 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25592 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25593 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25594 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25595 // CHECK12:       .omp.final.then:
25596 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25597 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25598 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25599 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25600 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25601 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25602 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25603 // CHECK12:       .omp.final.done:
25604 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25605 // CHECK12:       omp.precond.end:
25606 // CHECK12-NEXT:    ret void
25607 //
25608 //
25609 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..27
25610 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25611 // CHECK12-NEXT:  entry:
25612 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25613 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25614 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25615 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25616 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25617 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25618 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25619 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25620 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25621 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25622 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25623 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25624 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25625 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25626 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25627 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25628 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25629 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25630 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25631 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25632 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25633 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25634 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25635 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25636 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25637 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25638 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25639 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25640 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25641 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25642 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25643 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25644 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25645 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25646 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25647 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25648 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25649 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25650 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25651 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25652 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25653 // CHECK12:       omp.precond.then:
25654 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25655 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25656 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25657 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25658 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25659 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
25660 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
25661 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25662 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25663 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25664 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
25665 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25666 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25667 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25668 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
25669 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25670 // CHECK12:       cond.true:
25671 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25672 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25673 // CHECK12:       cond.false:
25674 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25675 // CHECK12-NEXT:    br label [[COND_END]]
25676 // CHECK12:       cond.end:
25677 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
25678 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25679 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25680 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
25681 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25682 // CHECK12:       omp.inner.for.cond:
25683 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25684 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
25685 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
25686 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25687 // CHECK12:       omp.inner.for.body:
25688 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25689 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
25690 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25691 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
25692 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
25693 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25694 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
25695 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
25696 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
25697 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25698 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
25699 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
25700 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
25701 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
25702 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25703 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
25704 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
25705 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25706 // CHECK12:       omp.body.continue:
25707 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25708 // CHECK12:       omp.inner.for.inc:
25709 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25710 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
25711 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25712 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
25713 // CHECK12:       omp.inner.for.end:
25714 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25715 // CHECK12:       omp.loop.exit:
25716 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25717 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
25718 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
25719 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25720 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25721 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25722 // CHECK12:       .omp.final.then:
25723 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25724 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
25725 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25726 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
25727 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
25728 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
25729 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25730 // CHECK12:       .omp.final.done:
25731 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25732 // CHECK12:       omp.precond.end:
25733 // CHECK12-NEXT:    ret void
25734 //
25735 //
25736 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
25737 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25738 // CHECK12-NEXT:  entry:
25739 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25740 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25741 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25742 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25743 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25744 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25745 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
25746 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
25747 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
25748 // CHECK12-NEXT:    ret void
25749 //
25750 //
25751 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..30
25752 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25753 // CHECK12-NEXT:  entry:
25754 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25755 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25756 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25757 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25758 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25759 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25760 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25761 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25762 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25763 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25764 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25765 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25766 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25767 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25768 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25769 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25770 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25771 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25772 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25773 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25774 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25775 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25776 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25777 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25778 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25779 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25780 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25781 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25782 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25783 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25784 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25785 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25786 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25787 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25788 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25789 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25790 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25791 // CHECK12:       omp.precond.then:
25792 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25793 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25794 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
25795 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25796 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25797 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25798 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25799 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25800 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25801 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25802 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25803 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25804 // CHECK12:       cond.true:
25805 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25806 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25807 // CHECK12:       cond.false:
25808 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25809 // CHECK12-NEXT:    br label [[COND_END]]
25810 // CHECK12:       cond.end:
25811 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25812 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25813 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25814 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25815 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25816 // CHECK12:       omp.inner.for.cond:
25817 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25818 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
25819 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25820 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25821 // CHECK12:       omp.inner.for.body:
25822 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
25823 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
25824 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
25825 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25826 // CHECK12:       omp.inner.for.inc:
25827 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25828 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
25829 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25830 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25831 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
25832 // CHECK12:       omp.inner.for.end:
25833 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25834 // CHECK12:       omp.loop.exit:
25835 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25836 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25837 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25838 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25839 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25840 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25841 // CHECK12:       .omp.final.then:
25842 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25843 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25844 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25845 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25846 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25847 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25848 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25849 // CHECK12:       .omp.final.done:
25850 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25851 // CHECK12:       omp.precond.end:
25852 // CHECK12-NEXT:    ret void
25853 //
25854 //
25855 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..31
25856 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25857 // CHECK12-NEXT:  entry:
25858 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25859 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25860 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25861 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25862 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25863 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25864 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25865 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25866 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25867 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25868 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25869 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25870 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25871 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25872 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25873 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25874 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25875 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25876 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25877 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25878 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25879 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25880 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25881 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25882 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25883 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25884 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25885 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25886 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25887 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25888 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25889 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25890 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25891 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25892 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25893 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25894 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25895 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25896 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25897 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25898 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25899 // CHECK12:       omp.precond.then:
25900 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25901 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25902 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25903 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25904 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25905 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
25906 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
25907 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25908 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25909 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25910 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
25911 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25912 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25913 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25914 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
25915 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25916 // CHECK12:       cond.true:
25917 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25918 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25919 // CHECK12:       cond.false:
25920 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25921 // CHECK12-NEXT:    br label [[COND_END]]
25922 // CHECK12:       cond.end:
25923 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
25924 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25925 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25926 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
25927 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25928 // CHECK12:       omp.inner.for.cond:
25929 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25930 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
25931 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
25932 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25933 // CHECK12:       omp.inner.for.body:
25934 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25935 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
25936 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25937 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
25938 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
25939 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25940 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
25941 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
25942 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
25943 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25944 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
25945 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
25946 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
25947 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
25948 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25949 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
25950 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
25951 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25952 // CHECK12:       omp.body.continue:
25953 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25954 // CHECK12:       omp.inner.for.inc:
25955 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25956 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
25957 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25958 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
25959 // CHECK12:       omp.inner.for.end:
25960 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25961 // CHECK12:       omp.loop.exit:
25962 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25963 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
25964 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
25965 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25966 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25967 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25968 // CHECK12:       .omp.final.then:
25969 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25970 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
25971 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25972 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
25973 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
25974 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
25975 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25976 // CHECK12:       .omp.final.done:
25977 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25978 // CHECK12:       omp.precond.end:
25979 // CHECK12-NEXT:    ret void
25980 //
25981 //
25982 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
25983 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25984 // CHECK12-NEXT:  entry:
25985 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
25986 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25987 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25988 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25989 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25990 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
25991 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25992 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25993 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
25994 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
25995 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
25996 // CHECK12-NEXT:    ret void
25997 //
25998 //
25999 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..34
26000 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26001 // CHECK12-NEXT:  entry:
26002 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26003 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26004 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
26005 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26006 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26007 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26008 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26009 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26010 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26011 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26012 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26013 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26014 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26015 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26016 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26017 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26018 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26019 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26020 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26021 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
26022 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26023 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26024 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26025 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26026 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
26027 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26028 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26029 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26030 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26031 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
26032 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
26033 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26034 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
26035 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26036 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26037 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26038 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26039 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26040 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
26041 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26042 // CHECK12:       omp.precond.then:
26043 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26044 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26045 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
26046 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26047 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26048 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
26049 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26050 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26051 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
26052 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26053 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26054 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26055 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26056 // CHECK12:       cond.true:
26057 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26058 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26059 // CHECK12:       cond.false:
26060 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26061 // CHECK12-NEXT:    br label [[COND_END]]
26062 // CHECK12:       cond.end:
26063 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26064 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26065 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26066 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26067 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26068 // CHECK12:       omp.inner.for.cond:
26069 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26070 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26071 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
26072 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
26073 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26074 // CHECK12:       omp.inner.for.body:
26075 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26076 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26077 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
26078 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26079 // CHECK12:       omp.inner.for.inc:
26080 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26081 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26082 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
26083 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26084 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26085 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26086 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
26087 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26088 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26089 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26090 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
26091 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26092 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26093 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26094 // CHECK12-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
26095 // CHECK12-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
26096 // CHECK12:       cond.true10:
26097 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26098 // CHECK12-NEXT:    br label [[COND_END12:%.*]]
26099 // CHECK12:       cond.false11:
26100 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26101 // CHECK12-NEXT:    br label [[COND_END12]]
26102 // CHECK12:       cond.end12:
26103 // CHECK12-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
26104 // CHECK12-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26105 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26106 // CHECK12-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26107 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
26108 // CHECK12:       omp.inner.for.end:
26109 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26110 // CHECK12:       omp.loop.exit:
26111 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26112 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
26113 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
26114 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26115 // CHECK12-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
26116 // CHECK12-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26117 // CHECK12:       .omp.final.then:
26118 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26119 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
26120 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
26121 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
26122 // CHECK12-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
26123 // CHECK12-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
26124 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26125 // CHECK12:       .omp.final.done:
26126 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26127 // CHECK12:       omp.precond.end:
26128 // CHECK12-NEXT:    ret void
26129 //
26130 //
26131 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..35
26132 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26133 // CHECK12-NEXT:  entry:
26134 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26135 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26136 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26137 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26138 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26139 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26140 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26141 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26142 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26143 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26144 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26145 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26146 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26147 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26148 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26149 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26150 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26151 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26152 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26153 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26154 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26155 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26156 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26157 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26158 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26159 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26160 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26161 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26162 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26163 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26164 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26165 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26166 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26167 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26168 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26169 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26170 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26171 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26172 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26173 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26174 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26175 // CHECK12:       omp.precond.then:
26176 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26177 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26178 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26179 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26180 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26181 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26182 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26183 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26184 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26185 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26186 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26187 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26188 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26189 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26190 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26191 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26192 // CHECK12:       cond.true:
26193 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26194 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26195 // CHECK12:       cond.false:
26196 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26197 // CHECK12-NEXT:    br label [[COND_END]]
26198 // CHECK12:       cond.end:
26199 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26200 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26201 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26202 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26203 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26204 // CHECK12:       omp.inner.for.cond:
26205 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26206 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
26207 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26208 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26209 // CHECK12:       omp.inner.for.body:
26210 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26211 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
26212 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26213 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
26214 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
26215 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26216 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
26217 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
26218 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
26219 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26220 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26221 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
26222 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
26223 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
26224 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26225 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26226 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
26227 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26228 // CHECK12:       omp.body.continue:
26229 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26230 // CHECK12:       omp.inner.for.inc:
26231 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26232 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
26233 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26234 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
26235 // CHECK12:       omp.inner.for.end:
26236 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26237 // CHECK12:       omp.loop.exit:
26238 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26239 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
26240 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
26241 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26242 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
26243 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26244 // CHECK12:       .omp.final.then:
26245 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26246 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
26247 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
26248 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
26249 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
26250 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
26251 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26252 // CHECK12:       .omp.final.done:
26253 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26254 // CHECK12:       omp.precond.end:
26255 // CHECK12-NEXT:    ret void
26256 //
26257 //
26258 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
26259 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26260 // CHECK12-NEXT:  entry:
26261 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26262 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26263 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26264 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26265 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26266 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26267 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26268 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26269 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26270 // CHECK12-NEXT:    ret void
26271 //
26272 //
26273 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..38
26274 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26275 // CHECK12-NEXT:  entry:
26276 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26277 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26278 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26279 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26280 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26281 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26282 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26283 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26284 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26285 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26286 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26287 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26288 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26289 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26290 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26291 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26292 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26293 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26294 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26295 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26296 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26297 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26298 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26299 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26300 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26301 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26302 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26303 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26304 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26305 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26306 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26307 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26308 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26309 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26310 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26311 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26312 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26313 // CHECK12:       omp.precond.then:
26314 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26315 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26316 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
26317 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26318 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26319 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26320 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
26321 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26322 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26323 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26324 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
26325 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26326 // CHECK12:       cond.true:
26327 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26328 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26329 // CHECK12:       cond.false:
26330 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26331 // CHECK12-NEXT:    br label [[COND_END]]
26332 // CHECK12:       cond.end:
26333 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
26334 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26335 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26336 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
26337 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26338 // CHECK12:       omp.inner.for.cond:
26339 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26340 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
26341 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
26342 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26343 // CHECK12:       omp.inner.for.body:
26344 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
26345 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
26346 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
26347 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26348 // CHECK12:       omp.inner.for.inc:
26349 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26350 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
26351 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
26352 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26353 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
26354 // CHECK12:       omp.inner.for.end:
26355 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26356 // CHECK12:       omp.loop.exit:
26357 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26358 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
26359 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
26360 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26361 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
26362 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26363 // CHECK12:       .omp.final.then:
26364 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26365 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
26366 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26367 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
26368 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
26369 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
26370 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26371 // CHECK12:       .omp.final.done:
26372 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26373 // CHECK12:       omp.precond.end:
26374 // CHECK12-NEXT:    ret void
26375 //
26376 //
26377 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..39
26378 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26379 // CHECK12-NEXT:  entry:
26380 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26381 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26382 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26383 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26384 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26385 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26386 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26387 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26388 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26389 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26390 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26391 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26392 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26393 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26394 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26395 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26396 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26397 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26398 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26399 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26400 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26401 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26402 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26403 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26404 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26405 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26406 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26407 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26408 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26409 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26410 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26411 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26412 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26413 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26414 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26415 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26416 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26417 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26418 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26419 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26420 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26421 // CHECK12:       omp.precond.then:
26422 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26423 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26424 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26425 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26426 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26427 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26428 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26429 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26430 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26431 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26432 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26433 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26434 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26435 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26436 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26437 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26438 // CHECK12:       cond.true:
26439 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26440 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26441 // CHECK12:       cond.false:
26442 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26443 // CHECK12-NEXT:    br label [[COND_END]]
26444 // CHECK12:       cond.end:
26445 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26446 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26447 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26448 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26449 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26450 // CHECK12:       omp.inner.for.cond:
26451 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26452 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
26453 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26454 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26455 // CHECK12:       omp.inner.for.body:
26456 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26457 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
26458 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26459 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
26460 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
26461 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26462 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
26463 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
26464 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
26465 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26466 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26467 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
26468 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
26469 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
26470 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26471 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26472 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
26473 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26474 // CHECK12:       omp.body.continue:
26475 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26476 // CHECK12:       omp.inner.for.inc:
26477 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26478 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
26479 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26480 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
26481 // CHECK12:       omp.inner.for.end:
26482 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26483 // CHECK12:       omp.loop.exit:
26484 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26485 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
26486 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
26487 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26488 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
26489 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26490 // CHECK12:       .omp.final.then:
26491 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26492 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
26493 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
26494 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
26495 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
26496 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
26497 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26498 // CHECK12:       .omp.final.done:
26499 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26500 // CHECK12:       omp.precond.end:
26501 // CHECK12-NEXT:    ret void
26502 //
26503 //
26504 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
26505 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26506 // CHECK12-NEXT:  entry:
26507 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
26508 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26509 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26510 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26511 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26512 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
26513 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26514 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26515 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26516 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26517 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26518 // CHECK12-NEXT:    ret void
26519 //
26520 //
26521 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..42
26522 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26523 // CHECK12-NEXT:  entry:
26524 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26525 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26526 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
26527 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26528 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26529 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26530 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26531 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26532 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26533 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26534 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26535 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26536 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26537 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26538 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26539 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26540 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26541 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
26542 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26543 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26544 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26545 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
26546 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26547 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26548 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26549 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26550 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
26551 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26552 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26553 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26554 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26555 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
26556 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
26557 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
26558 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26559 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26560 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
26561 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26562 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26563 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26564 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26565 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26566 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
26567 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26568 // CHECK12:       omp.precond.then:
26569 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26570 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26571 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
26572 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26573 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26574 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26575 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26576 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26577 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26578 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26579 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26580 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26581 // CHECK12:       cond.true:
26582 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26583 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26584 // CHECK12:       cond.false:
26585 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26586 // CHECK12-NEXT:    br label [[COND_END]]
26587 // CHECK12:       cond.end:
26588 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26589 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26590 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26591 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26592 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26593 // CHECK12:       omp.inner.for.cond:
26594 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26595 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
26596 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26597 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26598 // CHECK12:       omp.inner.for.body:
26599 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
26600 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
26601 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
26602 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
26603 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
26604 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
26605 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26606 // CHECK12:       omp.inner.for.inc:
26607 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26608 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
26609 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
26610 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26611 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
26612 // CHECK12:       omp.inner.for.end:
26613 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26614 // CHECK12:       omp.loop.exit:
26615 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26616 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
26617 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
26618 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26619 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
26620 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26621 // CHECK12:       .omp.final.then:
26622 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26623 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
26624 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
26625 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
26626 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
26627 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
26628 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26629 // CHECK12:       .omp.final.done:
26630 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26631 // CHECK12:       omp.precond.end:
26632 // CHECK12-NEXT:    ret void
26633 //
26634 //
26635 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..43
26636 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
26637 // CHECK12-NEXT:  entry:
26638 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26639 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26640 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26641 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26642 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26643 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26644 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26645 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26646 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26647 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26648 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26649 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26650 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26651 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26652 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26653 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26654 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26655 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26656 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
26657 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26658 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26659 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26660 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26661 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26662 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26663 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26664 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26665 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26666 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26667 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26668 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26669 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26670 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26671 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26672 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26673 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26674 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26675 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26676 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26677 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26678 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26679 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26680 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26681 // CHECK12:       omp.precond.then:
26682 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26683 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26684 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26685 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26686 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26687 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26688 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26689 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26690 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26691 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26692 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26693 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
26694 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
26695 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26696 // CHECK12:       omp.dispatch.cond:
26697 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26698 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26699 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
26700 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26701 // CHECK12:       cond.true:
26702 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26703 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26704 // CHECK12:       cond.false:
26705 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26706 // CHECK12-NEXT:    br label [[COND_END]]
26707 // CHECK12:       cond.end:
26708 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
26709 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26710 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26711 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
26712 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
26713 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26714 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
26715 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26716 // CHECK12:       omp.dispatch.body:
26717 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26718 // CHECK12:       omp.inner.for.cond:
26719 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26720 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
26721 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
26722 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26723 // CHECK12:       omp.inner.for.body:
26724 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26725 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
26726 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26727 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
26728 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
26729 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26730 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26731 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
26732 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
26733 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26734 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26735 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
26736 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
26737 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
26738 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26739 // CHECK12-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
26740 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
26741 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26742 // CHECK12:       omp.body.continue:
26743 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26744 // CHECK12:       omp.inner.for.inc:
26745 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26746 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
26747 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26748 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
26749 // CHECK12:       omp.inner.for.end:
26750 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
26751 // CHECK12:       omp.dispatch.inc:
26752 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26753 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
26754 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
26755 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
26756 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26757 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
26758 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
26759 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
26760 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
26761 // CHECK12:       omp.dispatch.end:
26762 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26763 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
26764 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
26765 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26766 // CHECK12-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
26767 // CHECK12-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26768 // CHECK12:       .omp.final.then:
26769 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26770 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
26771 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
26772 // CHECK12-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
26773 // CHECK12-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
26774 // CHECK12-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
26775 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26776 // CHECK12:       .omp.final.done:
26777 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26778 // CHECK12:       omp.precond.end:
26779 // CHECK12-NEXT:    ret void
26780 //
26781 //
26782 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
26783 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26784 // CHECK12-NEXT:  entry:
26785 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26786 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26787 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26788 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26789 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26790 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26791 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26792 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26793 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26794 // CHECK12-NEXT:    ret void
26795 //
26796 //
26797 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..46
26798 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26799 // CHECK12-NEXT:  entry:
26800 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26801 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26802 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26803 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26804 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26805 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26806 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26807 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26808 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26809 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26810 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26811 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26812 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26813 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26814 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26815 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26816 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26817 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26818 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26819 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26820 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26821 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26822 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26823 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26824 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26825 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26826 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26827 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26828 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26829 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26830 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26831 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26832 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26833 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26834 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26835 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26836 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26837 // CHECK12:       omp.precond.then:
26838 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26839 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26840 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
26841 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26842 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26843 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26844 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
26845 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26846 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26847 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26848 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
26849 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26850 // CHECK12:       cond.true:
26851 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26852 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26853 // CHECK12:       cond.false:
26854 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26855 // CHECK12-NEXT:    br label [[COND_END]]
26856 // CHECK12:       cond.end:
26857 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
26858 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26859 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26860 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
26861 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26862 // CHECK12:       omp.inner.for.cond:
26863 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26864 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
26865 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
26866 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26867 // CHECK12:       omp.inner.for.body:
26868 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
26869 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
26870 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
26871 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26872 // CHECK12:       omp.inner.for.inc:
26873 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26874 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
26875 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
26876 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26877 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
26878 // CHECK12:       omp.inner.for.end:
26879 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26880 // CHECK12:       omp.loop.exit:
26881 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26882 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
26883 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
26884 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26885 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
26886 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26887 // CHECK12:       .omp.final.then:
26888 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26889 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
26890 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26891 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
26892 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
26893 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
26894 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26895 // CHECK12:       .omp.final.done:
26896 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26897 // CHECK12:       omp.precond.end:
26898 // CHECK12-NEXT:    ret void
26899 //
26900 //
26901 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..47
26902 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26903 // CHECK12-NEXT:  entry:
26904 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26905 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26906 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26907 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26908 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26909 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26910 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26911 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26912 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26913 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26914 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26915 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26916 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26917 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26918 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26919 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26920 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26921 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26922 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26923 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26924 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26925 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26926 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26927 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26928 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26929 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26930 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26931 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26932 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26933 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26934 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26935 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26936 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26937 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26938 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26939 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26940 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26941 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26942 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26943 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26944 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26945 // CHECK12:       omp.precond.then:
26946 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26947 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26948 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26949 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26950 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26951 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26952 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26953 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26954 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26955 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26956 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26957 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26958 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
26959 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
26960 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26961 // CHECK12:       omp.dispatch.cond:
26962 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26963 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
26964 // CHECK12-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
26965 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
26966 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26967 // CHECK12:       omp.dispatch.body:
26968 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26969 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
26970 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26971 // CHECK12:       omp.inner.for.cond:
26972 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
26973 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
26974 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
26975 // CHECK12-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26976 // CHECK12:       omp.inner.for.body:
26977 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
26978 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
26979 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26980 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
26981 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
26982 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26983 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
26984 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
26985 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
26986 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26987 // CHECK12-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
26988 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
26989 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
26990 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
26991 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26992 // CHECK12-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
26993 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
26994 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26995 // CHECK12:       omp.body.continue:
26996 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26997 // CHECK12:       omp.inner.for.inc:
26998 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
26999 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
27000 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
27001 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
27002 // CHECK12:       omp.inner.for.end:
27003 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27004 // CHECK12:       omp.dispatch.inc:
27005 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
27006 // CHECK12:       omp.dispatch.end:
27007 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27008 // CHECK12-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
27009 // CHECK12-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27010 // CHECK12:       .omp.final.then:
27011 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27012 // CHECK12-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
27013 // CHECK12-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
27014 // CHECK12-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
27015 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
27016 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
27017 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27018 // CHECK12:       .omp.final.done:
27019 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27020 // CHECK12:       omp.precond.end:
27021 // CHECK12-NEXT:    ret void
27022 //
27023 //
27024 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
27025 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
27026 // CHECK12-NEXT:  entry:
27027 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
27028 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27029 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
27030 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
27031 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
27032 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
27033 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27034 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
27035 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
27036 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
27037 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
27038 // CHECK12-NEXT:    ret void
27039 //
27040 //
27041 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..50
27042 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
27043 // CHECK12-NEXT:  entry:
27044 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27045 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27046 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
27047 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
27048 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
27049 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
27050 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
27051 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27052 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27053 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27054 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27055 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27056 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
27057 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
27058 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
27059 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27060 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27061 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
27062 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
27063 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27064 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27065 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
27066 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
27067 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
27068 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
27069 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
27070 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
27071 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
27072 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
27073 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
27074 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
27075 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
27076 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
27077 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
27078 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27079 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27080 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
27081 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27082 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
27083 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27084 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
27085 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27086 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
27087 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27088 // CHECK12:       omp.precond.then:
27089 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
27090 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27091 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
27092 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27093 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27094 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27095 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
27096 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27097 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27098 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27099 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
27100 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27101 // CHECK12:       cond.true:
27102 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27103 // CHECK12-NEXT:    br label [[COND_END:%.*]]
27104 // CHECK12:       cond.false:
27105 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27106 // CHECK12-NEXT:    br label [[COND_END]]
27107 // CHECK12:       cond.end:
27108 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
27109 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
27110 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
27111 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
27112 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27113 // CHECK12:       omp.inner.for.cond:
27114 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27115 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
27116 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
27117 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27118 // CHECK12:       omp.inner.for.body:
27119 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
27120 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
27121 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
27122 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
27123 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
27124 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
27125 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27126 // CHECK12:       omp.inner.for.inc:
27127 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27128 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
27129 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
27130 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27131 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
27132 // CHECK12:       omp.inner.for.end:
27133 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27134 // CHECK12:       omp.loop.exit:
27135 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27136 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
27137 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
27138 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27139 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
27140 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27141 // CHECK12:       .omp.final.then:
27142 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27143 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
27144 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
27145 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
27146 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
27147 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
27148 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27149 // CHECK12:       .omp.final.done:
27150 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27151 // CHECK12:       omp.precond.end:
27152 // CHECK12-NEXT:    ret void
27153 //
27154 //
27155 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..51
27156 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
27157 // CHECK12-NEXT:  entry:
27158 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27159 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27160 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
27161 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
27162 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
27163 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
27164 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
27165 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
27166 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
27167 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27168 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27169 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27170 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27171 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
27172 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27173 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27174 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27175 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27176 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
27177 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27178 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27179 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27180 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27181 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
27182 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
27183 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
27184 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
27185 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27186 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
27187 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
27188 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
27189 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
27190 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
27191 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27192 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27193 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
27194 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27195 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
27196 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27197 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
27198 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27199 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
27200 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27201 // CHECK12:       omp.precond.then:
27202 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27203 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27204 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
27205 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27206 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27207 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
27208 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
27209 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27210 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27211 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27212 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27213 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27214 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27215 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
27216 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
27217 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
27218 // CHECK12:       omp.dispatch.cond:
27219 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27220 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
27221 // CHECK12-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
27222 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
27223 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
27224 // CHECK12:       omp.dispatch.body:
27225 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27226 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
27227 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27228 // CHECK12:       omp.inner.for.cond:
27229 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27230 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
27231 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
27232 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27233 // CHECK12:       omp.inner.for.body:
27234 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27235 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
27236 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27237 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
27238 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
27239 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27240 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
27241 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
27242 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
27243 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27244 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
27245 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
27246 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
27247 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
27248 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27249 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
27250 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
27251 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27252 // CHECK12:       omp.body.continue:
27253 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27254 // CHECK12:       omp.inner.for.inc:
27255 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27256 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
27257 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27258 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
27259 // CHECK12:       omp.inner.for.end:
27260 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27261 // CHECK12:       omp.dispatch.inc:
27262 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
27263 // CHECK12:       omp.dispatch.end:
27264 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27265 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
27266 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27267 // CHECK12:       .omp.final.then:
27268 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27269 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
27270 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
27271 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
27272 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
27273 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
27274 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27275 // CHECK12:       .omp.final.done:
27276 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27277 // CHECK12:       omp.precond.end:
27278 // CHECK12-NEXT:    ret void
27279 //
27280 //
27281 // CHECK12-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
27282 // CHECK12-SAME: () #[[ATTR4:[0-9]+]] {
27283 // CHECK12-NEXT:  entry:
27284 // CHECK12-NEXT:    call void @__tgt_register_requires(i64 1)
27285 // CHECK12-NEXT:    ret void
27286 //
27287 //
27288 // CHECK13-LABEL: define {{[^@]+}}@main
27289 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
27290 // CHECK13-NEXT:  entry:
27291 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
27292 // CHECK13-NEXT:    [[A:%.*]] = alloca double*, align 8
27293 // CHECK13-NEXT:    [[B:%.*]] = alloca double*, align 8
27294 // CHECK13-NEXT:    [[C:%.*]] = alloca double*, align 8
27295 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
27296 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
27297 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27298 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27299 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27300 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27301 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27302 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
27303 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27304 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
27305 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
27306 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
27307 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
27308 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27309 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27310 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
27311 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
27312 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
27313 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
27314 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
27315 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
27316 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27317 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27318 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
27319 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
27320 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
27321 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
27322 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
27323 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
27324 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
27325 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
27326 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
27327 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
27328 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
27329 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
27330 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
27331 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
27332 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
27333 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
27334 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
27335 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
27336 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
27337 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
27338 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
27339 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
27340 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
27341 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
27342 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
27343 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
27344 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
27345 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
27346 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
27347 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
27348 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
27349 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
27350 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
27351 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
27352 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
27353 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
27354 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
27355 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
27356 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
27357 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
27358 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
27359 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27360 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27361 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
27362 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27363 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27364 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27365 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27366 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27367 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27368 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
27369 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27370 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
27371 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27372 // CHECK13:       simd.if.then:
27373 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27374 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27375 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27376 // CHECK13:       omp.inner.for.cond:
27377 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27378 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
27379 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27380 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27381 // CHECK13:       omp.inner.for.body:
27382 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27383 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27384 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27385 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
27386 // CHECK13-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
27387 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27388 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
27389 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
27390 // CHECK13-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
27391 // CHECK13-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
27392 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27393 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
27394 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
27395 // CHECK13-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
27396 // CHECK13-NEXT:    [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
27397 // CHECK13-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
27398 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27399 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
27400 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
27401 // CHECK13-NEXT:    store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
27402 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27403 // CHECK13:       omp.body.continue:
27404 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27405 // CHECK13:       omp.inner.for.inc:
27406 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27407 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
27408 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27409 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
27410 // CHECK13:       omp.inner.for.end:
27411 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27412 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
27413 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
27414 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
27415 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27416 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
27417 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
27418 // CHECK13:       simd.if.end:
27419 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
27420 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
27421 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27422 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
27423 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
27424 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
27425 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
27426 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27427 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
27428 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
27429 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
27430 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27431 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
27432 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
27433 // CHECK13:       simd.if.then25:
27434 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27435 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
27436 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
27437 // CHECK13:       omp.inner.for.cond28:
27438 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27439 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
27440 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
27441 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
27442 // CHECK13:       omp.inner.for.body30:
27443 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27444 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
27445 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
27446 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
27447 // CHECK13-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
27448 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27449 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
27450 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
27451 // CHECK13-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
27452 // CHECK13-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
27453 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27454 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
27455 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
27456 // CHECK13-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
27457 // CHECK13-NEXT:    [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
27458 // CHECK13-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
27459 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27460 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
27461 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
27462 // CHECK13-NEXT:    store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
27463 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
27464 // CHECK13:       omp.body.continue40:
27465 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
27466 // CHECK13:       omp.inner.for.inc41:
27467 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27468 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
27469 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27470 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
27471 // CHECK13:       omp.inner.for.end43:
27472 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27473 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
27474 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
27475 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
27476 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
27477 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
27478 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
27479 // CHECK13:       simd.if.end48:
27480 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
27481 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
27482 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27483 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
27484 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
27485 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
27486 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
27487 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27488 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
27489 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
27490 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
27491 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27492 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
27493 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
27494 // CHECK13:       simd.if.then59:
27495 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
27496 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
27497 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
27498 // CHECK13:       omp.inner.for.cond62:
27499 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27500 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
27501 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
27502 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
27503 // CHECK13:       omp.inner.for.body64:
27504 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27505 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
27506 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
27507 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
27508 // CHECK13-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
27509 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27510 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
27511 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
27512 // CHECK13-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
27513 // CHECK13-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
27514 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27515 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
27516 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
27517 // CHECK13-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
27518 // CHECK13-NEXT:    [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
27519 // CHECK13-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
27520 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27521 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
27522 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
27523 // CHECK13-NEXT:    store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
27524 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
27525 // CHECK13:       omp.body.continue74:
27526 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
27527 // CHECK13:       omp.inner.for.inc75:
27528 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27529 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
27530 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27531 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
27532 // CHECK13:       omp.inner.for.end77:
27533 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27534 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
27535 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
27536 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
27537 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
27538 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
27539 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
27540 // CHECK13:       simd.if.end82:
27541 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
27542 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
27543 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27544 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
27545 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
27546 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
27547 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
27548 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
27549 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
27550 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
27551 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
27552 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27553 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
27554 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
27555 // CHECK13:       simd.if.then93:
27556 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
27557 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
27558 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
27559 // CHECK13:       omp.inner.for.cond96:
27560 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27561 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
27562 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
27563 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
27564 // CHECK13:       omp.inner.for.body98:
27565 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27566 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
27567 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
27568 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
27569 // CHECK13-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
27570 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27571 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
27572 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
27573 // CHECK13-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
27574 // CHECK13-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
27575 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27576 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
27577 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
27578 // CHECK13-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
27579 // CHECK13-NEXT:    [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
27580 // CHECK13-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
27581 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27582 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
27583 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
27584 // CHECK13-NEXT:    store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
27585 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
27586 // CHECK13:       omp.body.continue108:
27587 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
27588 // CHECK13:       omp.inner.for.inc109:
27589 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27590 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
27591 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27592 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
27593 // CHECK13:       omp.inner.for.end111:
27594 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27595 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
27596 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
27597 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
27598 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
27599 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
27600 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
27601 // CHECK13:       simd.if.end116:
27602 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
27603 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
27604 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
27605 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
27606 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27607 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
27608 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
27609 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
27610 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
27611 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
27612 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
27613 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
27614 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
27615 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27616 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
27617 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
27618 // CHECK13:       simd.if.then128:
27619 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
27620 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
27621 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
27622 // CHECK13:       omp.inner.for.cond131:
27623 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27624 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
27625 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
27626 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
27627 // CHECK13:       omp.inner.for.body133:
27628 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27629 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
27630 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
27631 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
27632 // CHECK13-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
27633 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27634 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
27635 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
27636 // CHECK13-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
27637 // CHECK13-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
27638 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27639 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
27640 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
27641 // CHECK13-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
27642 // CHECK13-NEXT:    [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
27643 // CHECK13-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
27644 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27645 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
27646 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
27647 // CHECK13-NEXT:    store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
27648 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
27649 // CHECK13:       omp.body.continue143:
27650 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
27651 // CHECK13:       omp.inner.for.inc144:
27652 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27653 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
27654 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27655 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
27656 // CHECK13:       omp.inner.for.end146:
27657 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27658 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
27659 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
27660 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
27661 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
27662 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
27663 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
27664 // CHECK13:       simd.if.end151:
27665 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
27666 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
27667 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27668 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
27669 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
27670 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
27671 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
27672 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
27673 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
27674 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
27675 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
27676 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27677 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
27678 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
27679 // CHECK13:       simd.if.then162:
27680 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
27681 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
27682 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
27683 // CHECK13:       omp.inner.for.cond165:
27684 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27685 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
27686 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
27687 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
27688 // CHECK13:       omp.inner.for.body167:
27689 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27690 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
27691 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
27692 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
27693 // CHECK13-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
27694 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27695 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
27696 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
27697 // CHECK13-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
27698 // CHECK13-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
27699 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27700 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
27701 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
27702 // CHECK13-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
27703 // CHECK13-NEXT:    [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
27704 // CHECK13-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
27705 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27706 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
27707 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
27708 // CHECK13-NEXT:    store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
27709 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
27710 // CHECK13:       omp.body.continue177:
27711 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
27712 // CHECK13:       omp.inner.for.inc178:
27713 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27714 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
27715 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27716 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
27717 // CHECK13:       omp.inner.for.end180:
27718 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27719 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
27720 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
27721 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
27722 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
27723 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
27724 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
27725 // CHECK13:       simd.if.end185:
27726 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
27727 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
27728 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
27729 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
27730 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27731 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
27732 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
27733 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
27734 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
27735 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
27736 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
27737 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
27738 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
27739 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27740 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
27741 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
27742 // CHECK13:       simd.if.then197:
27743 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
27744 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
27745 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
27746 // CHECK13:       omp.inner.for.cond200:
27747 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27748 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
27749 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
27750 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
27751 // CHECK13:       omp.inner.for.body202:
27752 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27753 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
27754 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
27755 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
27756 // CHECK13-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
27757 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27758 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
27759 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
27760 // CHECK13-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
27761 // CHECK13-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
27762 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27763 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
27764 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
27765 // CHECK13-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
27766 // CHECK13-NEXT:    [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
27767 // CHECK13-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
27768 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27769 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
27770 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
27771 // CHECK13-NEXT:    store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
27772 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
27773 // CHECK13:       omp.body.continue212:
27774 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
27775 // CHECK13:       omp.inner.for.inc213:
27776 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27777 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
27778 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27779 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
27780 // CHECK13:       omp.inner.for.end215:
27781 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27782 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
27783 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
27784 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
27785 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
27786 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
27787 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
27788 // CHECK13:       simd.if.end220:
27789 // CHECK13-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
27790 // CHECK13-NEXT:    ret i32 [[CALL]]
27791 //
27792 //
27793 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
27794 // CHECK13-SAME: () #[[ATTR1:[0-9]+]] comdat {
27795 // CHECK13-NEXT:  entry:
27796 // CHECK13-NEXT:    [[A:%.*]] = alloca i32*, align 8
27797 // CHECK13-NEXT:    [[B:%.*]] = alloca i32*, align 8
27798 // CHECK13-NEXT:    [[C:%.*]] = alloca i32*, align 8
27799 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
27800 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
27801 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27802 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27803 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27804 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27805 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27806 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
27807 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27808 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
27809 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
27810 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
27811 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
27812 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27813 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27814 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
27815 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
27816 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
27817 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
27818 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
27819 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
27820 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27821 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27822 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
27823 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
27824 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
27825 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
27826 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
27827 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
27828 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
27829 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
27830 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
27831 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
27832 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
27833 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
27834 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
27835 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
27836 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
27837 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
27838 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
27839 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
27840 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
27841 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
27842 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
27843 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
27844 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
27845 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
27846 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
27847 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
27848 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
27849 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
27850 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
27851 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
27852 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
27853 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
27854 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
27855 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
27856 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
27857 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
27858 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
27859 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
27860 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
27861 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
27862 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27863 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27864 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
27865 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27866 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27867 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27868 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27869 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27870 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27871 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
27872 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27873 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
27874 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27875 // CHECK13:       simd.if.then:
27876 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27877 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27878 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27879 // CHECK13:       omp.inner.for.cond:
27880 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27881 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
27882 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27883 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27884 // CHECK13:       omp.inner.for.body:
27885 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27886 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27887 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27888 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
27889 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
27890 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27891 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
27892 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
27893 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
27894 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
27895 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27896 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
27897 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
27898 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
27899 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
27900 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
27901 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27902 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
27903 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
27904 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
27905 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27906 // CHECK13:       omp.body.continue:
27907 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27908 // CHECK13:       omp.inner.for.inc:
27909 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27910 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
27911 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27912 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
27913 // CHECK13:       omp.inner.for.end:
27914 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27915 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
27916 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
27917 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
27918 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27919 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
27920 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
27921 // CHECK13:       simd.if.end:
27922 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
27923 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
27924 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27925 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
27926 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
27927 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
27928 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
27929 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27930 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
27931 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
27932 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
27933 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27934 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
27935 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
27936 // CHECK13:       simd.if.then25:
27937 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27938 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
27939 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
27940 // CHECK13:       omp.inner.for.cond28:
27941 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27942 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
27943 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
27944 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
27945 // CHECK13:       omp.inner.for.body30:
27946 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27947 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
27948 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
27949 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
27950 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
27951 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27952 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
27953 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
27954 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
27955 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
27956 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27957 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
27958 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
27959 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
27960 // CHECK13-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
27961 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
27962 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27963 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
27964 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
27965 // CHECK13-NEXT:    store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
27966 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
27967 // CHECK13:       omp.body.continue40:
27968 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
27969 // CHECK13:       omp.inner.for.inc41:
27970 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27971 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
27972 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27973 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
27974 // CHECK13:       omp.inner.for.end43:
27975 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27976 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
27977 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
27978 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
27979 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
27980 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
27981 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
27982 // CHECK13:       simd.if.end48:
27983 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
27984 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
27985 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27986 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
27987 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
27988 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
27989 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
27990 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27991 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
27992 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
27993 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
27994 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27995 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
27996 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
27997 // CHECK13:       simd.if.then59:
27998 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
27999 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
28000 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
28001 // CHECK13:       omp.inner.for.cond62:
28002 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28003 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
28004 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
28005 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
28006 // CHECK13:       omp.inner.for.body64:
28007 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28008 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
28009 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
28010 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
28011 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
28012 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28013 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
28014 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
28015 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
28016 // CHECK13-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
28017 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28018 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
28019 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
28020 // CHECK13-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
28021 // CHECK13-NEXT:    [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
28022 // CHECK13-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
28023 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28024 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
28025 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
28026 // CHECK13-NEXT:    store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
28027 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
28028 // CHECK13:       omp.body.continue74:
28029 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
28030 // CHECK13:       omp.inner.for.inc75:
28031 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28032 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
28033 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28034 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
28035 // CHECK13:       omp.inner.for.end77:
28036 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28037 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
28038 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
28039 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
28040 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
28041 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
28042 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
28043 // CHECK13:       simd.if.end82:
28044 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
28045 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
28046 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28047 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
28048 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
28049 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
28050 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
28051 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
28052 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
28053 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
28054 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
28055 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28056 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
28057 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
28058 // CHECK13:       simd.if.then93:
28059 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
28060 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
28061 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
28062 // CHECK13:       omp.inner.for.cond96:
28063 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28064 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
28065 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
28066 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
28067 // CHECK13:       omp.inner.for.body98:
28068 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28069 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
28070 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
28071 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
28072 // CHECK13-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
28073 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28074 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
28075 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
28076 // CHECK13-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
28077 // CHECK13-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
28078 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28079 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
28080 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
28081 // CHECK13-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
28082 // CHECK13-NEXT:    [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
28083 // CHECK13-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
28084 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28085 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
28086 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
28087 // CHECK13-NEXT:    store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
28088 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
28089 // CHECK13:       omp.body.continue108:
28090 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
28091 // CHECK13:       omp.inner.for.inc109:
28092 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28093 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
28094 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28095 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
28096 // CHECK13:       omp.inner.for.end111:
28097 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28098 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
28099 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
28100 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
28101 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
28102 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
28103 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
28104 // CHECK13:       simd.if.end116:
28105 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
28106 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
28107 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
28108 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
28109 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28110 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
28111 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
28112 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
28113 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
28114 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
28115 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
28116 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
28117 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
28118 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28119 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
28120 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
28121 // CHECK13:       simd.if.then128:
28122 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
28123 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
28124 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
28125 // CHECK13:       omp.inner.for.cond131:
28126 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28127 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
28128 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
28129 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
28130 // CHECK13:       omp.inner.for.body133:
28131 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28132 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
28133 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
28134 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
28135 // CHECK13-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
28136 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28137 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
28138 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
28139 // CHECK13-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
28140 // CHECK13-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
28141 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28142 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
28143 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
28144 // CHECK13-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
28145 // CHECK13-NEXT:    [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
28146 // CHECK13-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
28147 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28148 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
28149 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
28150 // CHECK13-NEXT:    store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
28151 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
28152 // CHECK13:       omp.body.continue143:
28153 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
28154 // CHECK13:       omp.inner.for.inc144:
28155 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28156 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
28157 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28158 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
28159 // CHECK13:       omp.inner.for.end146:
28160 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28161 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
28162 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
28163 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
28164 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
28165 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
28166 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
28167 // CHECK13:       simd.if.end151:
28168 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
28169 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
28170 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28171 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
28172 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
28173 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
28174 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
28175 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
28176 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
28177 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
28178 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
28179 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28180 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
28181 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
28182 // CHECK13:       simd.if.then162:
28183 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
28184 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
28185 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
28186 // CHECK13:       omp.inner.for.cond165:
28187 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28188 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
28189 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
28190 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
28191 // CHECK13:       omp.inner.for.body167:
28192 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28193 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
28194 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
28195 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
28196 // CHECK13-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
28197 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28198 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
28199 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
28200 // CHECK13-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
28201 // CHECK13-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
28202 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28203 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
28204 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
28205 // CHECK13-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
28206 // CHECK13-NEXT:    [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
28207 // CHECK13-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
28208 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28209 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
28210 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
28211 // CHECK13-NEXT:    store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
28212 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
28213 // CHECK13:       omp.body.continue177:
28214 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
28215 // CHECK13:       omp.inner.for.inc178:
28216 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28217 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
28218 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28219 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
28220 // CHECK13:       omp.inner.for.end180:
28221 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28222 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
28223 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
28224 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
28225 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
28226 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
28227 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
28228 // CHECK13:       simd.if.end185:
28229 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
28230 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
28231 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
28232 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
28233 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28234 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
28235 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
28236 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
28237 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
28238 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
28239 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
28240 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
28241 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
28242 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28243 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
28244 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
28245 // CHECK13:       simd.if.then197:
28246 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
28247 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
28248 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
28249 // CHECK13:       omp.inner.for.cond200:
28250 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28251 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
28252 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
28253 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
28254 // CHECK13:       omp.inner.for.body202:
28255 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28256 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
28257 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
28258 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
28259 // CHECK13-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
28260 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28261 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
28262 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
28263 // CHECK13-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
28264 // CHECK13-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
28265 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28266 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
28267 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
28268 // CHECK13-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
28269 // CHECK13-NEXT:    [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
28270 // CHECK13-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
28271 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28272 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
28273 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
28274 // CHECK13-NEXT:    store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
28275 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
28276 // CHECK13:       omp.body.continue212:
28277 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
28278 // CHECK13:       omp.inner.for.inc213:
28279 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28280 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
28281 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28282 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
28283 // CHECK13:       omp.inner.for.end215:
28284 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28285 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
28286 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
28287 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
28288 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
28289 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
28290 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
28291 // CHECK13:       simd.if.end220:
28292 // CHECK13-NEXT:    ret i32 0
28293 //
28294 //
28295 // CHECK14-LABEL: define {{[^@]+}}@main
28296 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
28297 // CHECK14-NEXT:  entry:
28298 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
28299 // CHECK14-NEXT:    [[A:%.*]] = alloca double*, align 8
28300 // CHECK14-NEXT:    [[B:%.*]] = alloca double*, align 8
28301 // CHECK14-NEXT:    [[C:%.*]] = alloca double*, align 8
28302 // CHECK14-NEXT:    [[N:%.*]] = alloca i32, align 4
28303 // CHECK14-NEXT:    [[CH:%.*]] = alloca i32, align 4
28304 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28305 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28306 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28307 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28308 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28309 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
28310 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28311 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
28312 // CHECK14-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
28313 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
28314 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
28315 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28316 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28317 // CHECK14-NEXT:    [[I23:%.*]] = alloca i32, align 4
28318 // CHECK14-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
28319 // CHECK14-NEXT:    [[I27:%.*]] = alloca i32, align 4
28320 // CHECK14-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
28321 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
28322 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
28323 // CHECK14-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
28324 // CHECK14-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
28325 // CHECK14-NEXT:    [[I57:%.*]] = alloca i32, align 4
28326 // CHECK14-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
28327 // CHECK14-NEXT:    [[I61:%.*]] = alloca i32, align 4
28328 // CHECK14-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
28329 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
28330 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
28331 // CHECK14-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
28332 // CHECK14-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
28333 // CHECK14-NEXT:    [[I91:%.*]] = alloca i32, align 4
28334 // CHECK14-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
28335 // CHECK14-NEXT:    [[I95:%.*]] = alloca i32, align 4
28336 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
28337 // CHECK14-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
28338 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
28339 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
28340 // CHECK14-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
28341 // CHECK14-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
28342 // CHECK14-NEXT:    [[I126:%.*]] = alloca i32, align 4
28343 // CHECK14-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
28344 // CHECK14-NEXT:    [[I130:%.*]] = alloca i32, align 4
28345 // CHECK14-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
28346 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
28347 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
28348 // CHECK14-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
28349 // CHECK14-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
28350 // CHECK14-NEXT:    [[I160:%.*]] = alloca i32, align 4
28351 // CHECK14-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
28352 // CHECK14-NEXT:    [[I164:%.*]] = alloca i32, align 4
28353 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
28354 // CHECK14-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
28355 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
28356 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
28357 // CHECK14-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
28358 // CHECK14-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
28359 // CHECK14-NEXT:    [[I195:%.*]] = alloca i32, align 4
28360 // CHECK14-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
28361 // CHECK14-NEXT:    [[I199:%.*]] = alloca i32, align 4
28362 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
28363 // CHECK14-NEXT:    store i32 10000, i32* [[N]], align 4
28364 // CHECK14-NEXT:    store i32 100, i32* [[CH]], align 4
28365 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
28366 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28367 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28368 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
28369 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28370 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28371 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28372 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28373 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28374 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
28375 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
28376 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28377 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
28378 // CHECK14-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28379 // CHECK14:       simd.if.then:
28380 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28381 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
28382 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28383 // CHECK14:       omp.inner.for.cond:
28384 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28385 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
28386 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
28387 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28388 // CHECK14:       omp.inner.for.body:
28389 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28390 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
28391 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28392 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
28393 // CHECK14-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
28394 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28395 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
28396 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
28397 // CHECK14-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
28398 // CHECK14-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
28399 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28400 // CHECK14-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
28401 // CHECK14-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
28402 // CHECK14-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
28403 // CHECK14-NEXT:    [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
28404 // CHECK14-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
28405 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28406 // CHECK14-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
28407 // CHECK14-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
28408 // CHECK14-NEXT:    store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
28409 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28410 // CHECK14:       omp.body.continue:
28411 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28412 // CHECK14:       omp.inner.for.inc:
28413 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28414 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
28415 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28416 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
28417 // CHECK14:       omp.inner.for.end:
28418 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28419 // CHECK14-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
28420 // CHECK14-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
28421 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
28422 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
28423 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
28424 // CHECK14-NEXT:    br label [[SIMD_IF_END]]
28425 // CHECK14:       simd.if.end:
28426 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
28427 // CHECK14-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
28428 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28429 // CHECK14-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
28430 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
28431 // CHECK14-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
28432 // CHECK14-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
28433 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
28434 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
28435 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
28436 // CHECK14-NEXT:    store i32 0, i32* [[I23]], align 4
28437 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28438 // CHECK14-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
28439 // CHECK14-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
28440 // CHECK14:       simd.if.then25:
28441 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
28442 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
28443 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
28444 // CHECK14:       omp.inner.for.cond28:
28445 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28446 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
28447 // CHECK14-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
28448 // CHECK14-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
28449 // CHECK14:       omp.inner.for.body30:
28450 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28451 // CHECK14-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
28452 // CHECK14-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
28453 // CHECK14-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
28454 // CHECK14-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
28455 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28456 // CHECK14-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
28457 // CHECK14-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
28458 // CHECK14-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
28459 // CHECK14-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
28460 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28461 // CHECK14-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
28462 // CHECK14-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
28463 // CHECK14-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
28464 // CHECK14-NEXT:    [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
28465 // CHECK14-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
28466 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28467 // CHECK14-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
28468 // CHECK14-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
28469 // CHECK14-NEXT:    store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
28470 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
28471 // CHECK14:       omp.body.continue40:
28472 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
28473 // CHECK14:       omp.inner.for.inc41:
28474 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28475 // CHECK14-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
28476 // CHECK14-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28477 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
28478 // CHECK14:       omp.inner.for.end43:
28479 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28480 // CHECK14-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
28481 // CHECK14-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
28482 // CHECK14-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
28483 // CHECK14-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
28484 // CHECK14-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
28485 // CHECK14-NEXT:    br label [[SIMD_IF_END48]]
28486 // CHECK14:       simd.if.end48:
28487 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
28488 // CHECK14-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
28489 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28490 // CHECK14-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
28491 // CHECK14-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
28492 // CHECK14-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
28493 // CHECK14-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
28494 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
28495 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
28496 // CHECK14-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
28497 // CHECK14-NEXT:    store i32 0, i32* [[I57]], align 4
28498 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28499 // CHECK14-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
28500 // CHECK14-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
28501 // CHECK14:       simd.if.then59:
28502 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
28503 // CHECK14-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
28504 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
28505 // CHECK14:       omp.inner.for.cond62:
28506 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28507 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
28508 // CHECK14-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
28509 // CHECK14-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
28510 // CHECK14:       omp.inner.for.body64:
28511 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28512 // CHECK14-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
28513 // CHECK14-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
28514 // CHECK14-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
28515 // CHECK14-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
28516 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28517 // CHECK14-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
28518 // CHECK14-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
28519 // CHECK14-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
28520 // CHECK14-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
28521 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28522 // CHECK14-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
28523 // CHECK14-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
28524 // CHECK14-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
28525 // CHECK14-NEXT:    [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
28526 // CHECK14-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
28527 // CHECK14-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28528 // CHECK14-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
28529 // CHECK14-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
28530 // CHECK14-NEXT:    store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
28531 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
28532 // CHECK14:       omp.body.continue74:
28533 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
28534 // CHECK14:       omp.inner.for.inc75:
28535 // CHECK14-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28536 // CHECK14-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
28537 // CHECK14-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28538 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
28539 // CHECK14:       omp.inner.for.end77:
28540 // CHECK14-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28541 // CHECK14-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
28542 // CHECK14-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
28543 // CHECK14-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
28544 // CHECK14-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
28545 // CHECK14-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
28546 // CHECK14-NEXT:    br label [[SIMD_IF_END82]]
28547 // CHECK14:       simd.if.end82:
28548 // CHECK14-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
28549 // CHECK14-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
28550 // CHECK14-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28551 // CHECK14-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
28552 // CHECK14-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
28553 // CHECK14-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
28554 // CHECK14-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
28555 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
28556 // CHECK14-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
28557 // CHECK14-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
28558 // CHECK14-NEXT:    store i32 0, i32* [[I91]], align 4
28559 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28560 // CHECK14-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
28561 // CHECK14-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
28562 // CHECK14:       simd.if.then93:
28563 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
28564 // CHECK14-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
28565 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
28566 // CHECK14:       omp.inner.for.cond96:
28567 // CHECK14-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28568 // CHECK14-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
28569 // CHECK14-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
28570 // CHECK14-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
28571 // CHECK14:       omp.inner.for.body98:
28572 // CHECK14-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28573 // CHECK14-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
28574 // CHECK14-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
28575 // CHECK14-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
28576 // CHECK14-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
28577 // CHECK14-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28578 // CHECK14-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
28579 // CHECK14-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
28580 // CHECK14-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
28581 // CHECK14-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
28582 // CHECK14-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28583 // CHECK14-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
28584 // CHECK14-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
28585 // CHECK14-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
28586 // CHECK14-NEXT:    [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
28587 // CHECK14-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
28588 // CHECK14-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28589 // CHECK14-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
28590 // CHECK14-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
28591 // CHECK14-NEXT:    store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
28592 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
28593 // CHECK14:       omp.body.continue108:
28594 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
28595 // CHECK14:       omp.inner.for.inc109:
28596 // CHECK14-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28597 // CHECK14-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
28598 // CHECK14-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28599 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
28600 // CHECK14:       omp.inner.for.end111:
28601 // CHECK14-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28602 // CHECK14-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
28603 // CHECK14-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
28604 // CHECK14-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
28605 // CHECK14-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
28606 // CHECK14-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
28607 // CHECK14-NEXT:    br label [[SIMD_IF_END116]]
28608 // CHECK14:       simd.if.end116:
28609 // CHECK14-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
28610 // CHECK14-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
28611 // CHECK14-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
28612 // CHECK14-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
28613 // CHECK14-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28614 // CHECK14-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
28615 // CHECK14-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
28616 // CHECK14-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
28617 // CHECK14-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
28618 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
28619 // CHECK14-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
28620 // CHECK14-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
28621 // CHECK14-NEXT:    store i32 0, i32* [[I126]], align 4
28622 // CHECK14-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28623 // CHECK14-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
28624 // CHECK14-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
28625 // CHECK14:       simd.if.then128:
28626 // CHECK14-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
28627 // CHECK14-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
28628 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
28629 // CHECK14:       omp.inner.for.cond131:
28630 // CHECK14-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28631 // CHECK14-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
28632 // CHECK14-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
28633 // CHECK14-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
28634 // CHECK14:       omp.inner.for.body133:
28635 // CHECK14-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28636 // CHECK14-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
28637 // CHECK14-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
28638 // CHECK14-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
28639 // CHECK14-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
28640 // CHECK14-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28641 // CHECK14-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
28642 // CHECK14-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
28643 // CHECK14-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
28644 // CHECK14-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
28645 // CHECK14-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28646 // CHECK14-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
28647 // CHECK14-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
28648 // CHECK14-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
28649 // CHECK14-NEXT:    [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
28650 // CHECK14-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
28651 // CHECK14-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28652 // CHECK14-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
28653 // CHECK14-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
28654 // CHECK14-NEXT:    store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
28655 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
28656 // CHECK14:       omp.body.continue143:
28657 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
28658 // CHECK14:       omp.inner.for.inc144:
28659 // CHECK14-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28660 // CHECK14-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
28661 // CHECK14-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28662 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
28663 // CHECK14:       omp.inner.for.end146:
28664 // CHECK14-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28665 // CHECK14-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
28666 // CHECK14-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
28667 // CHECK14-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
28668 // CHECK14-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
28669 // CHECK14-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
28670 // CHECK14-NEXT:    br label [[SIMD_IF_END151]]
28671 // CHECK14:       simd.if.end151:
28672 // CHECK14-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
28673 // CHECK14-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
28674 // CHECK14-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28675 // CHECK14-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
28676 // CHECK14-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
28677 // CHECK14-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
28678 // CHECK14-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
28679 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
28680 // CHECK14-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
28681 // CHECK14-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
28682 // CHECK14-NEXT:    store i32 0, i32* [[I160]], align 4
28683 // CHECK14-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28684 // CHECK14-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
28685 // CHECK14-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
28686 // CHECK14:       simd.if.then162:
28687 // CHECK14-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
28688 // CHECK14-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
28689 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
28690 // CHECK14:       omp.inner.for.cond165:
28691 // CHECK14-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28692 // CHECK14-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
28693 // CHECK14-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
28694 // CHECK14-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
28695 // CHECK14:       omp.inner.for.body167:
28696 // CHECK14-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28697 // CHECK14-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
28698 // CHECK14-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
28699 // CHECK14-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
28700 // CHECK14-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
28701 // CHECK14-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28702 // CHECK14-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
28703 // CHECK14-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
28704 // CHECK14-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
28705 // CHECK14-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
28706 // CHECK14-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28707 // CHECK14-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
28708 // CHECK14-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
28709 // CHECK14-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
28710 // CHECK14-NEXT:    [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
28711 // CHECK14-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
28712 // CHECK14-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28713 // CHECK14-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
28714 // CHECK14-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
28715 // CHECK14-NEXT:    store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
28716 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
28717 // CHECK14:       omp.body.continue177:
28718 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
28719 // CHECK14:       omp.inner.for.inc178:
28720 // CHECK14-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28721 // CHECK14-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
28722 // CHECK14-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28723 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
28724 // CHECK14:       omp.inner.for.end180:
28725 // CHECK14-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28726 // CHECK14-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
28727 // CHECK14-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
28728 // CHECK14-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
28729 // CHECK14-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
28730 // CHECK14-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
28731 // CHECK14-NEXT:    br label [[SIMD_IF_END185]]
28732 // CHECK14:       simd.if.end185:
28733 // CHECK14-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
28734 // CHECK14-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
28735 // CHECK14-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
28736 // CHECK14-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
28737 // CHECK14-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28738 // CHECK14-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
28739 // CHECK14-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
28740 // CHECK14-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
28741 // CHECK14-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
28742 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
28743 // CHECK14-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
28744 // CHECK14-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
28745 // CHECK14-NEXT:    store i32 0, i32* [[I195]], align 4
28746 // CHECK14-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28747 // CHECK14-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
28748 // CHECK14-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
28749 // CHECK14:       simd.if.then197:
28750 // CHECK14-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
28751 // CHECK14-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
28752 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
28753 // CHECK14:       omp.inner.for.cond200:
28754 // CHECK14-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28755 // CHECK14-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
28756 // CHECK14-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
28757 // CHECK14-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
28758 // CHECK14:       omp.inner.for.body202:
28759 // CHECK14-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28760 // CHECK14-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
28761 // CHECK14-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
28762 // CHECK14-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
28763 // CHECK14-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
28764 // CHECK14-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28765 // CHECK14-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
28766 // CHECK14-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
28767 // CHECK14-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
28768 // CHECK14-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
28769 // CHECK14-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28770 // CHECK14-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
28771 // CHECK14-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
28772 // CHECK14-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
28773 // CHECK14-NEXT:    [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
28774 // CHECK14-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
28775 // CHECK14-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28776 // CHECK14-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
28777 // CHECK14-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
28778 // CHECK14-NEXT:    store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
28779 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
28780 // CHECK14:       omp.body.continue212:
28781 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
28782 // CHECK14:       omp.inner.for.inc213:
28783 // CHECK14-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28784 // CHECK14-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
28785 // CHECK14-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28786 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
28787 // CHECK14:       omp.inner.for.end215:
28788 // CHECK14-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28789 // CHECK14-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
28790 // CHECK14-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
28791 // CHECK14-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
28792 // CHECK14-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
28793 // CHECK14-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
28794 // CHECK14-NEXT:    br label [[SIMD_IF_END220]]
28795 // CHECK14:       simd.if.end220:
28796 // CHECK14-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
28797 // CHECK14-NEXT:    ret i32 [[CALL]]
28798 //
28799 //
28800 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
28801 // CHECK14-SAME: () #[[ATTR1:[0-9]+]] comdat {
28802 // CHECK14-NEXT:  entry:
28803 // CHECK14-NEXT:    [[A:%.*]] = alloca i32*, align 8
28804 // CHECK14-NEXT:    [[B:%.*]] = alloca i32*, align 8
28805 // CHECK14-NEXT:    [[C:%.*]] = alloca i32*, align 8
28806 // CHECK14-NEXT:    [[N:%.*]] = alloca i32, align 4
28807 // CHECK14-NEXT:    [[CH:%.*]] = alloca i32, align 4
28808 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28809 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28810 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28811 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28812 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28813 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
28814 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28815 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
28816 // CHECK14-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
28817 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
28818 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
28819 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28820 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28821 // CHECK14-NEXT:    [[I23:%.*]] = alloca i32, align 4
28822 // CHECK14-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
28823 // CHECK14-NEXT:    [[I27:%.*]] = alloca i32, align 4
28824 // CHECK14-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
28825 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
28826 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
28827 // CHECK14-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
28828 // CHECK14-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
28829 // CHECK14-NEXT:    [[I57:%.*]] = alloca i32, align 4
28830 // CHECK14-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
28831 // CHECK14-NEXT:    [[I61:%.*]] = alloca i32, align 4
28832 // CHECK14-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
28833 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
28834 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
28835 // CHECK14-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
28836 // CHECK14-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
28837 // CHECK14-NEXT:    [[I91:%.*]] = alloca i32, align 4
28838 // CHECK14-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
28839 // CHECK14-NEXT:    [[I95:%.*]] = alloca i32, align 4
28840 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
28841 // CHECK14-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
28842 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
28843 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
28844 // CHECK14-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
28845 // CHECK14-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
28846 // CHECK14-NEXT:    [[I126:%.*]] = alloca i32, align 4
28847 // CHECK14-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
28848 // CHECK14-NEXT:    [[I130:%.*]] = alloca i32, align 4
28849 // CHECK14-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
28850 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
28851 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
28852 // CHECK14-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
28853 // CHECK14-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
28854 // CHECK14-NEXT:    [[I160:%.*]] = alloca i32, align 4
28855 // CHECK14-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
28856 // CHECK14-NEXT:    [[I164:%.*]] = alloca i32, align 4
28857 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
28858 // CHECK14-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
28859 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
28860 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
28861 // CHECK14-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
28862 // CHECK14-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
28863 // CHECK14-NEXT:    [[I195:%.*]] = alloca i32, align 4
28864 // CHECK14-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
28865 // CHECK14-NEXT:    [[I199:%.*]] = alloca i32, align 4
28866 // CHECK14-NEXT:    store i32 10000, i32* [[N]], align 4
28867 // CHECK14-NEXT:    store i32 100, i32* [[CH]], align 4
28868 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
28869 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28870 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28871 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
28872 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28873 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28874 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28875 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28876 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28877 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
28878 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
28879 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28880 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
28881 // CHECK14-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28882 // CHECK14:       simd.if.then:
28883 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28884 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
28885 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28886 // CHECK14:       omp.inner.for.cond:
28887 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28888 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
28889 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
28890 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28891 // CHECK14:       omp.inner.for.body:
28892 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28893 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
28894 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28895 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
28896 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
28897 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28898 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
28899 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
28900 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28901 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
28902 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28903 // CHECK14-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
28904 // CHECK14-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
28905 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
28906 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
28907 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
28908 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28909 // CHECK14-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
28910 // CHECK14-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
28911 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
28912 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28913 // CHECK14:       omp.body.continue:
28914 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28915 // CHECK14:       omp.inner.for.inc:
28916 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28917 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
28918 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28919 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
28920 // CHECK14:       omp.inner.for.end:
28921 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28922 // CHECK14-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
28923 // CHECK14-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
28924 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
28925 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
28926 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
28927 // CHECK14-NEXT:    br label [[SIMD_IF_END]]
28928 // CHECK14:       simd.if.end:
28929 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
28930 // CHECK14-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
28931 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28932 // CHECK14-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
28933 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
28934 // CHECK14-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
28935 // CHECK14-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
28936 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
28937 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
28938 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
28939 // CHECK14-NEXT:    store i32 0, i32* [[I23]], align 4
28940 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28941 // CHECK14-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
28942 // CHECK14-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
28943 // CHECK14:       simd.if.then25:
28944 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
28945 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
28946 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
28947 // CHECK14:       omp.inner.for.cond28:
28948 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28949 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
28950 // CHECK14-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
28951 // CHECK14-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
28952 // CHECK14:       omp.inner.for.body30:
28953 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28954 // CHECK14-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
28955 // CHECK14-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
28956 // CHECK14-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
28957 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
28958 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28959 // CHECK14-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
28960 // CHECK14-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
28961 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
28962 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
28963 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28964 // CHECK14-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
28965 // CHECK14-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
28966 // CHECK14-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
28967 // CHECK14-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
28968 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
28969 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28970 // CHECK14-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
28971 // CHECK14-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
28972 // CHECK14-NEXT:    store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
28973 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
28974 // CHECK14:       omp.body.continue40:
28975 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
28976 // CHECK14:       omp.inner.for.inc41:
28977 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28978 // CHECK14-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
28979 // CHECK14-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28980 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
28981 // CHECK14:       omp.inner.for.end43:
28982 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28983 // CHECK14-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
28984 // CHECK14-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
28985 // CHECK14-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
28986 // CHECK14-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
28987 // CHECK14-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
28988 // CHECK14-NEXT:    br label [[SIMD_IF_END48]]
28989 // CHECK14:       simd.if.end48:
28990 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
28991 // CHECK14-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
28992 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28993 // CHECK14-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
28994 // CHECK14-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
28995 // CHECK14-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
28996 // CHECK14-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
28997 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
28998 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
28999 // CHECK14-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
29000 // CHECK14-NEXT:    store i32 0, i32* [[I57]], align 4
29001 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
29002 // CHECK14-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
29003 // CHECK14-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
29004 // CHECK14:       simd.if.then59:
29005 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
29006 // CHECK14-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
29007 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
29008 // CHECK14:       omp.inner.for.cond62:
29009 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29010 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
29011 // CHECK14-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29012 // CHECK14-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
29013 // CHECK14:       omp.inner.for.body64:
29014 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29015 // CHECK14-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
29016 // CHECK14-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
29017 // CHECK14-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
29018 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
29019 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29020 // CHECK14-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
29021 // CHECK14-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
29022 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
29023 // CHECK14-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
29024 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29025 // CHECK14-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
29026 // CHECK14-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
29027 // CHECK14-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
29028 // CHECK14-NEXT:    [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
29029 // CHECK14-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
29030 // CHECK14-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29031 // CHECK14-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
29032 // CHECK14-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
29033 // CHECK14-NEXT:    store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
29034 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
29035 // CHECK14:       omp.body.continue74:
29036 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
29037 // CHECK14:       omp.inner.for.inc75:
29038 // CHECK14-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29039 // CHECK14-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
29040 // CHECK14-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29041 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
29042 // CHECK14:       omp.inner.for.end77:
29043 // CHECK14-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
29044 // CHECK14-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
29045 // CHECK14-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
29046 // CHECK14-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
29047 // CHECK14-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
29048 // CHECK14-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
29049 // CHECK14-NEXT:    br label [[SIMD_IF_END82]]
29050 // CHECK14:       simd.if.end82:
29051 // CHECK14-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
29052 // CHECK14-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
29053 // CHECK14-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29054 // CHECK14-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
29055 // CHECK14-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
29056 // CHECK14-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
29057 // CHECK14-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
29058 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
29059 // CHECK14-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
29060 // CHECK14-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
29061 // CHECK14-NEXT:    store i32 0, i32* [[I91]], align 4
29062 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29063 // CHECK14-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
29064 // CHECK14-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
29065 // CHECK14:       simd.if.then93:
29066 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
29067 // CHECK14-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
29068 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
29069 // CHECK14:       omp.inner.for.cond96:
29070 // CHECK14-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29071 // CHECK14-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
29072 // CHECK14-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
29073 // CHECK14-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
29074 // CHECK14:       omp.inner.for.body98:
29075 // CHECK14-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29076 // CHECK14-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
29077 // CHECK14-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
29078 // CHECK14-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
29079 // CHECK14-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
29080 // CHECK14-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29081 // CHECK14-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
29082 // CHECK14-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
29083 // CHECK14-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
29084 // CHECK14-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
29085 // CHECK14-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29086 // CHECK14-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
29087 // CHECK14-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
29088 // CHECK14-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
29089 // CHECK14-NEXT:    [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
29090 // CHECK14-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
29091 // CHECK14-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29092 // CHECK14-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
29093 // CHECK14-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
29094 // CHECK14-NEXT:    store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
29095 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
29096 // CHECK14:       omp.body.continue108:
29097 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
29098 // CHECK14:       omp.inner.for.inc109:
29099 // CHECK14-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29100 // CHECK14-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
29101 // CHECK14-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29102 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
29103 // CHECK14:       omp.inner.for.end111:
29104 // CHECK14-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29105 // CHECK14-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
29106 // CHECK14-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
29107 // CHECK14-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
29108 // CHECK14-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
29109 // CHECK14-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
29110 // CHECK14-NEXT:    br label [[SIMD_IF_END116]]
29111 // CHECK14:       simd.if.end116:
29112 // CHECK14-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
29113 // CHECK14-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
29114 // CHECK14-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
29115 // CHECK14-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
29116 // CHECK14-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29117 // CHECK14-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
29118 // CHECK14-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
29119 // CHECK14-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
29120 // CHECK14-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
29121 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
29122 // CHECK14-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
29123 // CHECK14-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
29124 // CHECK14-NEXT:    store i32 0, i32* [[I126]], align 4
29125 // CHECK14-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29126 // CHECK14-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
29127 // CHECK14-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
29128 // CHECK14:       simd.if.then128:
29129 // CHECK14-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
29130 // CHECK14-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
29131 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
29132 // CHECK14:       omp.inner.for.cond131:
29133 // CHECK14-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29134 // CHECK14-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
29135 // CHECK14-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
29136 // CHECK14-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
29137 // CHECK14:       omp.inner.for.body133:
29138 // CHECK14-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29139 // CHECK14-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
29140 // CHECK14-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
29141 // CHECK14-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
29142 // CHECK14-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
29143 // CHECK14-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29144 // CHECK14-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
29145 // CHECK14-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
29146 // CHECK14-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
29147 // CHECK14-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
29148 // CHECK14-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29149 // CHECK14-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
29150 // CHECK14-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
29151 // CHECK14-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
29152 // CHECK14-NEXT:    [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
29153 // CHECK14-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
29154 // CHECK14-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29155 // CHECK14-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
29156 // CHECK14-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
29157 // CHECK14-NEXT:    store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
29158 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
29159 // CHECK14:       omp.body.continue143:
29160 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
29161 // CHECK14:       omp.inner.for.inc144:
29162 // CHECK14-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29163 // CHECK14-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
29164 // CHECK14-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29165 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
29166 // CHECK14:       omp.inner.for.end146:
29167 // CHECK14-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29168 // CHECK14-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
29169 // CHECK14-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
29170 // CHECK14-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
29171 // CHECK14-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
29172 // CHECK14-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
29173 // CHECK14-NEXT:    br label [[SIMD_IF_END151]]
29174 // CHECK14:       simd.if.end151:
29175 // CHECK14-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
29176 // CHECK14-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
29177 // CHECK14-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29178 // CHECK14-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
29179 // CHECK14-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
29180 // CHECK14-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
29181 // CHECK14-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
29182 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
29183 // CHECK14-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
29184 // CHECK14-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
29185 // CHECK14-NEXT:    store i32 0, i32* [[I160]], align 4
29186 // CHECK14-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29187 // CHECK14-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
29188 // CHECK14-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
29189 // CHECK14:       simd.if.then162:
29190 // CHECK14-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
29191 // CHECK14-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
29192 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
29193 // CHECK14:       omp.inner.for.cond165:
29194 // CHECK14-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29195 // CHECK14-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
29196 // CHECK14-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
29197 // CHECK14-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
29198 // CHECK14:       omp.inner.for.body167:
29199 // CHECK14-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29200 // CHECK14-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
29201 // CHECK14-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
29202 // CHECK14-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
29203 // CHECK14-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
29204 // CHECK14-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29205 // CHECK14-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
29206 // CHECK14-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
29207 // CHECK14-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
29208 // CHECK14-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
29209 // CHECK14-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29210 // CHECK14-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
29211 // CHECK14-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
29212 // CHECK14-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
29213 // CHECK14-NEXT:    [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
29214 // CHECK14-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
29215 // CHECK14-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29216 // CHECK14-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
29217 // CHECK14-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
29218 // CHECK14-NEXT:    store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
29219 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
29220 // CHECK14:       omp.body.continue177:
29221 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
29222 // CHECK14:       omp.inner.for.inc178:
29223 // CHECK14-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29224 // CHECK14-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
29225 // CHECK14-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29226 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
29227 // CHECK14:       omp.inner.for.end180:
29228 // CHECK14-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29229 // CHECK14-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
29230 // CHECK14-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
29231 // CHECK14-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
29232 // CHECK14-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
29233 // CHECK14-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
29234 // CHECK14-NEXT:    br label [[SIMD_IF_END185]]
29235 // CHECK14:       simd.if.end185:
29236 // CHECK14-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
29237 // CHECK14-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
29238 // CHECK14-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
29239 // CHECK14-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
29240 // CHECK14-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29241 // CHECK14-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
29242 // CHECK14-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
29243 // CHECK14-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
29244 // CHECK14-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
29245 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
29246 // CHECK14-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
29247 // CHECK14-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
29248 // CHECK14-NEXT:    store i32 0, i32* [[I195]], align 4
29249 // CHECK14-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29250 // CHECK14-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
29251 // CHECK14-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
29252 // CHECK14:       simd.if.then197:
29253 // CHECK14-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
29254 // CHECK14-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
29255 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
29256 // CHECK14:       omp.inner.for.cond200:
29257 // CHECK14-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29258 // CHECK14-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
29259 // CHECK14-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
29260 // CHECK14-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
29261 // CHECK14:       omp.inner.for.body202:
29262 // CHECK14-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29263 // CHECK14-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
29264 // CHECK14-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
29265 // CHECK14-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
29266 // CHECK14-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
29267 // CHECK14-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29268 // CHECK14-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
29269 // CHECK14-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
29270 // CHECK14-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
29271 // CHECK14-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
29272 // CHECK14-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29273 // CHECK14-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
29274 // CHECK14-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
29275 // CHECK14-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
29276 // CHECK14-NEXT:    [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
29277 // CHECK14-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
29278 // CHECK14-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29279 // CHECK14-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
29280 // CHECK14-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
29281 // CHECK14-NEXT:    store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
29282 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
29283 // CHECK14:       omp.body.continue212:
29284 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
29285 // CHECK14:       omp.inner.for.inc213:
29286 // CHECK14-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29287 // CHECK14-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
29288 // CHECK14-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29289 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
29290 // CHECK14:       omp.inner.for.end215:
29291 // CHECK14-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29292 // CHECK14-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
29293 // CHECK14-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
29294 // CHECK14-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
29295 // CHECK14-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
29296 // CHECK14-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
29297 // CHECK14-NEXT:    br label [[SIMD_IF_END220]]
29298 // CHECK14:       simd.if.end220:
29299 // CHECK14-NEXT:    ret i32 0
29300 //
29301 //
29302 // CHECK15-LABEL: define {{[^@]+}}@main
29303 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
29304 // CHECK15-NEXT:  entry:
29305 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
29306 // CHECK15-NEXT:    [[A:%.*]] = alloca double*, align 4
29307 // CHECK15-NEXT:    [[B:%.*]] = alloca double*, align 4
29308 // CHECK15-NEXT:    [[C:%.*]] = alloca double*, align 4
29309 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
29310 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
29311 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29312 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29313 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29314 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29315 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29316 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
29317 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29318 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
29319 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
29320 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
29321 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
29322 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
29323 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
29324 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
29325 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
29326 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
29327 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
29328 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
29329 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
29330 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
29331 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
29332 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
29333 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
29334 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
29335 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
29336 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
29337 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
29338 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
29339 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
29340 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
29341 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
29342 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
29343 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
29344 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
29345 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
29346 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
29347 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
29348 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
29349 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
29350 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
29351 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
29352 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
29353 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
29354 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
29355 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
29356 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
29357 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
29358 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
29359 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
29360 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
29361 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
29362 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
29363 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
29364 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
29365 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
29366 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
29367 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
29368 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
29369 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
29370 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
29371 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
29372 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
29373 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29374 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29375 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
29376 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
29377 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
29378 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29379 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29380 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29381 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29382 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
29383 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29384 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
29385 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29386 // CHECK15:       simd.if.then:
29387 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29388 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
29389 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29390 // CHECK15:       omp.inner.for.cond:
29391 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29392 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
29393 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
29394 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29395 // CHECK15:       omp.inner.for.body:
29396 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29397 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
29398 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29399 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
29400 // CHECK15-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
29401 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29402 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
29403 // CHECK15-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
29404 // CHECK15-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
29405 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29406 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
29407 // CHECK15-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
29408 // CHECK15-NEXT:    [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
29409 // CHECK15-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
29410 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29411 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
29412 // CHECK15-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
29413 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29414 // CHECK15:       omp.body.continue:
29415 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29416 // CHECK15:       omp.inner.for.inc:
29417 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29418 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
29419 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29420 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
29421 // CHECK15:       omp.inner.for.end:
29422 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29423 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
29424 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
29425 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
29426 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
29427 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
29428 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
29429 // CHECK15:       simd.if.end:
29430 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
29431 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
29432 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29433 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
29434 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
29435 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
29436 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
29437 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
29438 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
29439 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
29440 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
29441 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29442 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
29443 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
29444 // CHECK15:       simd.if.then23:
29445 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
29446 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
29447 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
29448 // CHECK15:       omp.inner.for.cond26:
29449 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29450 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
29451 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
29452 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
29453 // CHECK15:       omp.inner.for.body28:
29454 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29455 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
29456 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
29457 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
29458 // CHECK15-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
29459 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29460 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
29461 // CHECK15-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
29462 // CHECK15-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
29463 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29464 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
29465 // CHECK15-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
29466 // CHECK15-NEXT:    [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
29467 // CHECK15-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
29468 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29469 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
29470 // CHECK15-NEXT:    store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
29471 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
29472 // CHECK15:       omp.body.continue35:
29473 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
29474 // CHECK15:       omp.inner.for.inc36:
29475 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29476 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
29477 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29478 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
29479 // CHECK15:       omp.inner.for.end38:
29480 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29481 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
29482 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
29483 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
29484 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
29485 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
29486 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
29487 // CHECK15:       simd.if.end43:
29488 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
29489 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
29490 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29491 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
29492 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
29493 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
29494 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
29495 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
29496 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
29497 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
29498 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
29499 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29500 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
29501 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
29502 // CHECK15:       simd.if.then54:
29503 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
29504 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
29505 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
29506 // CHECK15:       omp.inner.for.cond57:
29507 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29508 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
29509 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29510 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
29511 // CHECK15:       omp.inner.for.body59:
29512 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29513 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
29514 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
29515 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
29516 // CHECK15-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
29517 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29518 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
29519 // CHECK15-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
29520 // CHECK15-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
29521 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29522 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
29523 // CHECK15-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
29524 // CHECK15-NEXT:    [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
29525 // CHECK15-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
29526 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29527 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
29528 // CHECK15-NEXT:    store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
29529 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
29530 // CHECK15:       omp.body.continue66:
29531 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
29532 // CHECK15:       omp.inner.for.inc67:
29533 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29534 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
29535 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29536 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
29537 // CHECK15:       omp.inner.for.end69:
29538 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29539 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
29540 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
29541 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
29542 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
29543 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
29544 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
29545 // CHECK15:       simd.if.end74:
29546 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
29547 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
29548 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29549 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
29550 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
29551 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
29552 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
29553 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
29554 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
29555 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
29556 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
29557 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29558 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
29559 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
29560 // CHECK15:       simd.if.then85:
29561 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
29562 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
29563 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
29564 // CHECK15:       omp.inner.for.cond88:
29565 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29566 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
29567 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
29568 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
29569 // CHECK15:       omp.inner.for.body90:
29570 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29571 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
29572 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
29573 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
29574 // CHECK15-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
29575 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29576 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
29577 // CHECK15-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
29578 // CHECK15-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
29579 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29580 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
29581 // CHECK15-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
29582 // CHECK15-NEXT:    [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
29583 // CHECK15-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
29584 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29585 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
29586 // CHECK15-NEXT:    store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
29587 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
29588 // CHECK15:       omp.body.continue97:
29589 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
29590 // CHECK15:       omp.inner.for.inc98:
29591 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29592 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
29593 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29594 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
29595 // CHECK15:       omp.inner.for.end100:
29596 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29597 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
29598 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
29599 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
29600 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
29601 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
29602 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
29603 // CHECK15:       simd.if.end105:
29604 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
29605 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
29606 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
29607 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
29608 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29609 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
29610 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
29611 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
29612 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
29613 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
29614 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
29615 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
29616 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
29617 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29618 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
29619 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
29620 // CHECK15:       simd.if.then117:
29621 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
29622 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
29623 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
29624 // CHECK15:       omp.inner.for.cond120:
29625 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29626 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
29627 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
29628 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
29629 // CHECK15:       omp.inner.for.body122:
29630 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29631 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
29632 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
29633 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
29634 // CHECK15-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
29635 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29636 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
29637 // CHECK15-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
29638 // CHECK15-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
29639 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29640 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
29641 // CHECK15-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
29642 // CHECK15-NEXT:    [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
29643 // CHECK15-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
29644 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29645 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
29646 // CHECK15-NEXT:    store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
29647 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
29648 // CHECK15:       omp.body.continue129:
29649 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
29650 // CHECK15:       omp.inner.for.inc130:
29651 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29652 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
29653 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29654 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
29655 // CHECK15:       omp.inner.for.end132:
29656 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29657 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
29658 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
29659 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
29660 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
29661 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
29662 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
29663 // CHECK15:       simd.if.end137:
29664 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
29665 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
29666 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29667 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
29668 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
29669 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
29670 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
29671 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
29672 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
29673 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
29674 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
29675 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29676 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
29677 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
29678 // CHECK15:       simd.if.then148:
29679 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
29680 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
29681 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
29682 // CHECK15:       omp.inner.for.cond151:
29683 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29684 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
29685 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
29686 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
29687 // CHECK15:       omp.inner.for.body153:
29688 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29689 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
29690 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
29691 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
29692 // CHECK15-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
29693 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29694 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
29695 // CHECK15-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
29696 // CHECK15-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
29697 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29698 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
29699 // CHECK15-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
29700 // CHECK15-NEXT:    [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
29701 // CHECK15-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
29702 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29703 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
29704 // CHECK15-NEXT:    store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
29705 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
29706 // CHECK15:       omp.body.continue160:
29707 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
29708 // CHECK15:       omp.inner.for.inc161:
29709 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29710 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
29711 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29712 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
29713 // CHECK15:       omp.inner.for.end163:
29714 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29715 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
29716 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
29717 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
29718 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
29719 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
29720 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
29721 // CHECK15:       simd.if.end168:
29722 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
29723 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
29724 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
29725 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
29726 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29727 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
29728 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
29729 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
29730 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
29731 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
29732 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
29733 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
29734 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
29735 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29736 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
29737 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
29738 // CHECK15:       simd.if.then180:
29739 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
29740 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
29741 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
29742 // CHECK15:       omp.inner.for.cond183:
29743 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29744 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
29745 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
29746 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
29747 // CHECK15:       omp.inner.for.body185:
29748 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29749 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
29750 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
29751 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
29752 // CHECK15-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
29753 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29754 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
29755 // CHECK15-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
29756 // CHECK15-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
29757 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29758 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
29759 // CHECK15-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
29760 // CHECK15-NEXT:    [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
29761 // CHECK15-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
29762 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29763 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
29764 // CHECK15-NEXT:    store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
29765 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
29766 // CHECK15:       omp.body.continue192:
29767 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
29768 // CHECK15:       omp.inner.for.inc193:
29769 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29770 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
29771 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29772 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
29773 // CHECK15:       omp.inner.for.end195:
29774 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29775 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
29776 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
29777 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
29778 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
29779 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
29780 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
29781 // CHECK15:       simd.if.end200:
29782 // CHECK15-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
29783 // CHECK15-NEXT:    ret i32 [[CALL]]
29784 //
29785 //
29786 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
29787 // CHECK15-SAME: () #[[ATTR1:[0-9]+]] comdat {
29788 // CHECK15-NEXT:  entry:
29789 // CHECK15-NEXT:    [[A:%.*]] = alloca i32*, align 4
29790 // CHECK15-NEXT:    [[B:%.*]] = alloca i32*, align 4
29791 // CHECK15-NEXT:    [[C:%.*]] = alloca i32*, align 4
29792 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
29793 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
29794 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29795 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29796 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29797 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29798 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29799 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
29800 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29801 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
29802 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
29803 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
29804 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
29805 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
29806 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
29807 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
29808 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
29809 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
29810 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
29811 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
29812 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
29813 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
29814 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
29815 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
29816 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
29817 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
29818 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
29819 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
29820 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
29821 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
29822 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
29823 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
29824 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
29825 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
29826 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
29827 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
29828 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
29829 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
29830 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
29831 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
29832 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
29833 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
29834 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
29835 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
29836 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
29837 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
29838 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
29839 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
29840 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
29841 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
29842 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
29843 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
29844 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
29845 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
29846 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
29847 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
29848 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
29849 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
29850 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
29851 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
29852 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
29853 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
29854 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
29855 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29856 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29857 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
29858 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
29859 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
29860 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29861 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29862 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29863 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29864 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
29865 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29866 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
29867 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29868 // CHECK15:       simd.if.then:
29869 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29870 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
29871 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29872 // CHECK15:       omp.inner.for.cond:
29873 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29874 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
29875 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
29876 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29877 // CHECK15:       omp.inner.for.body:
29878 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29879 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
29880 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29881 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
29882 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
29883 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29884 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
29885 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29886 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
29887 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29888 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
29889 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
29890 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
29891 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
29892 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29893 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
29894 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
29895 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29896 // CHECK15:       omp.body.continue:
29897 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29898 // CHECK15:       omp.inner.for.inc:
29899 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29900 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
29901 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29902 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
29903 // CHECK15:       omp.inner.for.end:
29904 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29905 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
29906 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
29907 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
29908 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
29909 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
29910 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
29911 // CHECK15:       simd.if.end:
29912 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
29913 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
29914 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29915 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
29916 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
29917 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
29918 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
29919 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
29920 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
29921 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
29922 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
29923 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29924 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
29925 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
29926 // CHECK15:       simd.if.then23:
29927 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
29928 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
29929 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
29930 // CHECK15:       omp.inner.for.cond26:
29931 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29932 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
29933 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
29934 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
29935 // CHECK15:       omp.inner.for.body28:
29936 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29937 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
29938 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
29939 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
29940 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
29941 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29942 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
29943 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
29944 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
29945 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29946 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
29947 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
29948 // CHECK15-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
29949 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
29950 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29951 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
29952 // CHECK15-NEXT:    store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
29953 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
29954 // CHECK15:       omp.body.continue35:
29955 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
29956 // CHECK15:       omp.inner.for.inc36:
29957 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29958 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
29959 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29960 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
29961 // CHECK15:       omp.inner.for.end38:
29962 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29963 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
29964 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
29965 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
29966 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
29967 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
29968 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
29969 // CHECK15:       simd.if.end43:
29970 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
29971 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
29972 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29973 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
29974 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
29975 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
29976 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
29977 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
29978 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
29979 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
29980 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
29981 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29982 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
29983 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
29984 // CHECK15:       simd.if.then54:
29985 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
29986 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
29987 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
29988 // CHECK15:       omp.inner.for.cond57:
29989 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
29990 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
29991 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29992 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
29993 // CHECK15:       omp.inner.for.body59:
29994 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
29995 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
29996 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
29997 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
29998 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
29999 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30000 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
30001 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
30002 // CHECK15-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
30003 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30004 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
30005 // CHECK15-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
30006 // CHECK15-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
30007 // CHECK15-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
30008 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30009 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
30010 // CHECK15-NEXT:    store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
30011 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30012 // CHECK15:       omp.body.continue66:
30013 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30014 // CHECK15:       omp.inner.for.inc67:
30015 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30016 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30017 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30018 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
30019 // CHECK15:       omp.inner.for.end69:
30020 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30021 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30022 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30023 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30024 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30025 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30026 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
30027 // CHECK15:       simd.if.end74:
30028 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
30029 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
30030 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30031 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
30032 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
30033 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
30034 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
30035 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
30036 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
30037 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
30038 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
30039 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30040 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
30041 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
30042 // CHECK15:       simd.if.then85:
30043 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
30044 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
30045 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
30046 // CHECK15:       omp.inner.for.cond88:
30047 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30048 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
30049 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
30050 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
30051 // CHECK15:       omp.inner.for.body90:
30052 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30053 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
30054 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
30055 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
30056 // CHECK15-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
30057 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30058 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
30059 // CHECK15-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
30060 // CHECK15-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
30061 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30062 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
30063 // CHECK15-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
30064 // CHECK15-NEXT:    [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
30065 // CHECK15-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
30066 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30067 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
30068 // CHECK15-NEXT:    store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
30069 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
30070 // CHECK15:       omp.body.continue97:
30071 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
30072 // CHECK15:       omp.inner.for.inc98:
30073 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30074 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
30075 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30076 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
30077 // CHECK15:       omp.inner.for.end100:
30078 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30079 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
30080 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
30081 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
30082 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
30083 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
30084 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
30085 // CHECK15:       simd.if.end105:
30086 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
30087 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
30088 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
30089 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
30090 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30091 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
30092 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
30093 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
30094 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
30095 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
30096 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
30097 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
30098 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
30099 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30100 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
30101 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
30102 // CHECK15:       simd.if.then117:
30103 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
30104 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
30105 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
30106 // CHECK15:       omp.inner.for.cond120:
30107 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30108 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
30109 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
30110 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
30111 // CHECK15:       omp.inner.for.body122:
30112 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30113 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
30114 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
30115 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
30116 // CHECK15-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
30117 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30118 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
30119 // CHECK15-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
30120 // CHECK15-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
30121 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30122 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
30123 // CHECK15-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
30124 // CHECK15-NEXT:    [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
30125 // CHECK15-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
30126 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30127 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
30128 // CHECK15-NEXT:    store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
30129 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
30130 // CHECK15:       omp.body.continue129:
30131 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
30132 // CHECK15:       omp.inner.for.inc130:
30133 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30134 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
30135 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30136 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
30137 // CHECK15:       omp.inner.for.end132:
30138 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30139 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
30140 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
30141 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
30142 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
30143 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
30144 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
30145 // CHECK15:       simd.if.end137:
30146 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
30147 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
30148 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30149 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
30150 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
30151 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
30152 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
30153 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
30154 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
30155 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
30156 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
30157 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30158 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
30159 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
30160 // CHECK15:       simd.if.then148:
30161 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
30162 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
30163 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
30164 // CHECK15:       omp.inner.for.cond151:
30165 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30166 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
30167 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
30168 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
30169 // CHECK15:       omp.inner.for.body153:
30170 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30171 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
30172 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
30173 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
30174 // CHECK15-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
30175 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30176 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
30177 // CHECK15-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
30178 // CHECK15-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
30179 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30180 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
30181 // CHECK15-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
30182 // CHECK15-NEXT:    [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
30183 // CHECK15-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
30184 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30185 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
30186 // CHECK15-NEXT:    store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
30187 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
30188 // CHECK15:       omp.body.continue160:
30189 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
30190 // CHECK15:       omp.inner.for.inc161:
30191 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30192 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
30193 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30194 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
30195 // CHECK15:       omp.inner.for.end163:
30196 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30197 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
30198 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
30199 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
30200 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
30201 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
30202 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
30203 // CHECK15:       simd.if.end168:
30204 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
30205 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
30206 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
30207 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
30208 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30209 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
30210 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
30211 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
30212 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
30213 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
30214 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
30215 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
30216 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
30217 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30218 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
30219 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
30220 // CHECK15:       simd.if.then180:
30221 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
30222 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
30223 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
30224 // CHECK15:       omp.inner.for.cond183:
30225 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30226 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
30227 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
30228 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
30229 // CHECK15:       omp.inner.for.body185:
30230 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30231 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
30232 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
30233 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
30234 // CHECK15-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
30235 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30236 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
30237 // CHECK15-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
30238 // CHECK15-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
30239 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30240 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
30241 // CHECK15-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
30242 // CHECK15-NEXT:    [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
30243 // CHECK15-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
30244 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30245 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
30246 // CHECK15-NEXT:    store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
30247 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
30248 // CHECK15:       omp.body.continue192:
30249 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
30250 // CHECK15:       omp.inner.for.inc193:
30251 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30252 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
30253 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30254 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
30255 // CHECK15:       omp.inner.for.end195:
30256 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30257 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
30258 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
30259 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
30260 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
30261 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
30262 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
30263 // CHECK15:       simd.if.end200:
30264 // CHECK15-NEXT:    ret i32 0
30265 //
30266 //
30267 // CHECK16-LABEL: define {{[^@]+}}@main
30268 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] {
30269 // CHECK16-NEXT:  entry:
30270 // CHECK16-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
30271 // CHECK16-NEXT:    [[A:%.*]] = alloca double*, align 4
30272 // CHECK16-NEXT:    [[B:%.*]] = alloca double*, align 4
30273 // CHECK16-NEXT:    [[C:%.*]] = alloca double*, align 4
30274 // CHECK16-NEXT:    [[N:%.*]] = alloca i32, align 4
30275 // CHECK16-NEXT:    [[CH:%.*]] = alloca i32, align 4
30276 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30277 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30278 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30279 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30280 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30281 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
30282 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30283 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
30284 // CHECK16-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
30285 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
30286 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
30287 // CHECK16-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30288 // CHECK16-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30289 // CHECK16-NEXT:    [[I21:%.*]] = alloca i32, align 4
30290 // CHECK16-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
30291 // CHECK16-NEXT:    [[I25:%.*]] = alloca i32, align 4
30292 // CHECK16-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
30293 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
30294 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
30295 // CHECK16-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
30296 // CHECK16-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
30297 // CHECK16-NEXT:    [[I52:%.*]] = alloca i32, align 4
30298 // CHECK16-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
30299 // CHECK16-NEXT:    [[I56:%.*]] = alloca i32, align 4
30300 // CHECK16-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
30301 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
30302 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
30303 // CHECK16-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
30304 // CHECK16-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
30305 // CHECK16-NEXT:    [[I83:%.*]] = alloca i32, align 4
30306 // CHECK16-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
30307 // CHECK16-NEXT:    [[I87:%.*]] = alloca i32, align 4
30308 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
30309 // CHECK16-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
30310 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
30311 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
30312 // CHECK16-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
30313 // CHECK16-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
30314 // CHECK16-NEXT:    [[I115:%.*]] = alloca i32, align 4
30315 // CHECK16-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
30316 // CHECK16-NEXT:    [[I119:%.*]] = alloca i32, align 4
30317 // CHECK16-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
30318 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
30319 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
30320 // CHECK16-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
30321 // CHECK16-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
30322 // CHECK16-NEXT:    [[I146:%.*]] = alloca i32, align 4
30323 // CHECK16-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
30324 // CHECK16-NEXT:    [[I150:%.*]] = alloca i32, align 4
30325 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
30326 // CHECK16-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
30327 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
30328 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
30329 // CHECK16-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
30330 // CHECK16-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
30331 // CHECK16-NEXT:    [[I178:%.*]] = alloca i32, align 4
30332 // CHECK16-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
30333 // CHECK16-NEXT:    [[I182:%.*]] = alloca i32, align 4
30334 // CHECK16-NEXT:    store i32 0, i32* [[RETVAL]], align 4
30335 // CHECK16-NEXT:    store i32 10000, i32* [[N]], align 4
30336 // CHECK16-NEXT:    store i32 100, i32* [[CH]], align 4
30337 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30338 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
30339 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30340 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
30341 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30342 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30343 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30344 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30345 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30346 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
30347 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
30348 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30349 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
30350 // CHECK16-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30351 // CHECK16:       simd.if.then:
30352 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30353 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
30354 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30355 // CHECK16:       omp.inner.for.cond:
30356 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30357 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
30358 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
30359 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30360 // CHECK16:       omp.inner.for.body:
30361 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30362 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
30363 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30364 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
30365 // CHECK16-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
30366 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30367 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
30368 // CHECK16-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
30369 // CHECK16-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
30370 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30371 // CHECK16-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
30372 // CHECK16-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
30373 // CHECK16-NEXT:    [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
30374 // CHECK16-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
30375 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30376 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
30377 // CHECK16-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
30378 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30379 // CHECK16:       omp.body.continue:
30380 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30381 // CHECK16:       omp.inner.for.inc:
30382 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30383 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
30384 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30385 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
30386 // CHECK16:       omp.inner.for.end:
30387 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30388 // CHECK16-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
30389 // CHECK16-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
30390 // CHECK16-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
30391 // CHECK16-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
30392 // CHECK16-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
30393 // CHECK16-NEXT:    br label [[SIMD_IF_END]]
30394 // CHECK16:       simd.if.end:
30395 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
30396 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
30397 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30398 // CHECK16-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
30399 // CHECK16-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
30400 // CHECK16-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
30401 // CHECK16-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
30402 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30403 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
30404 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
30405 // CHECK16-NEXT:    store i32 0, i32* [[I21]], align 4
30406 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30407 // CHECK16-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
30408 // CHECK16-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
30409 // CHECK16:       simd.if.then23:
30410 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30411 // CHECK16-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
30412 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
30413 // CHECK16:       omp.inner.for.cond26:
30414 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30415 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
30416 // CHECK16-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
30417 // CHECK16-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
30418 // CHECK16:       omp.inner.for.body28:
30419 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30420 // CHECK16-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
30421 // CHECK16-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
30422 // CHECK16-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
30423 // CHECK16-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
30424 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30425 // CHECK16-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
30426 // CHECK16-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
30427 // CHECK16-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
30428 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30429 // CHECK16-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
30430 // CHECK16-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
30431 // CHECK16-NEXT:    [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
30432 // CHECK16-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
30433 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30434 // CHECK16-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
30435 // CHECK16-NEXT:    store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
30436 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
30437 // CHECK16:       omp.body.continue35:
30438 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
30439 // CHECK16:       omp.inner.for.inc36:
30440 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30441 // CHECK16-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
30442 // CHECK16-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30443 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
30444 // CHECK16:       omp.inner.for.end38:
30445 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30446 // CHECK16-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
30447 // CHECK16-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
30448 // CHECK16-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
30449 // CHECK16-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
30450 // CHECK16-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
30451 // CHECK16-NEXT:    br label [[SIMD_IF_END43]]
30452 // CHECK16:       simd.if.end43:
30453 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
30454 // CHECK16-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
30455 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30456 // CHECK16-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
30457 // CHECK16-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
30458 // CHECK16-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
30459 // CHECK16-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
30460 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
30461 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
30462 // CHECK16-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
30463 // CHECK16-NEXT:    store i32 0, i32* [[I52]], align 4
30464 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30465 // CHECK16-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
30466 // CHECK16-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
30467 // CHECK16:       simd.if.then54:
30468 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
30469 // CHECK16-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
30470 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
30471 // CHECK16:       omp.inner.for.cond57:
30472 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30473 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
30474 // CHECK16-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
30475 // CHECK16-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
30476 // CHECK16:       omp.inner.for.body59:
30477 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30478 // CHECK16-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
30479 // CHECK16-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
30480 // CHECK16-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
30481 // CHECK16-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
30482 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30483 // CHECK16-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
30484 // CHECK16-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
30485 // CHECK16-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
30486 // CHECK16-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30487 // CHECK16-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
30488 // CHECK16-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
30489 // CHECK16-NEXT:    [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
30490 // CHECK16-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
30491 // CHECK16-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30492 // CHECK16-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
30493 // CHECK16-NEXT:    store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
30494 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30495 // CHECK16:       omp.body.continue66:
30496 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30497 // CHECK16:       omp.inner.for.inc67:
30498 // CHECK16-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30499 // CHECK16-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30500 // CHECK16-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30501 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
30502 // CHECK16:       omp.inner.for.end69:
30503 // CHECK16-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30504 // CHECK16-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30505 // CHECK16-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30506 // CHECK16-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30507 // CHECK16-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30508 // CHECK16-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30509 // CHECK16-NEXT:    br label [[SIMD_IF_END74]]
30510 // CHECK16:       simd.if.end74:
30511 // CHECK16-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
30512 // CHECK16-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
30513 // CHECK16-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30514 // CHECK16-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
30515 // CHECK16-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
30516 // CHECK16-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
30517 // CHECK16-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
30518 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
30519 // CHECK16-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
30520 // CHECK16-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
30521 // CHECK16-NEXT:    store i32 0, i32* [[I83]], align 4
30522 // CHECK16-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30523 // CHECK16-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
30524 // CHECK16-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
30525 // CHECK16:       simd.if.then85:
30526 // CHECK16-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
30527 // CHECK16-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
30528 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
30529 // CHECK16:       omp.inner.for.cond88:
30530 // CHECK16-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30531 // CHECK16-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
30532 // CHECK16-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
30533 // CHECK16-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
30534 // CHECK16:       omp.inner.for.body90:
30535 // CHECK16-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30536 // CHECK16-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
30537 // CHECK16-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
30538 // CHECK16-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
30539 // CHECK16-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
30540 // CHECK16-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30541 // CHECK16-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
30542 // CHECK16-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
30543 // CHECK16-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
30544 // CHECK16-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30545 // CHECK16-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
30546 // CHECK16-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
30547 // CHECK16-NEXT:    [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
30548 // CHECK16-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
30549 // CHECK16-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30550 // CHECK16-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
30551 // CHECK16-NEXT:    store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
30552 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
30553 // CHECK16:       omp.body.continue97:
30554 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
30555 // CHECK16:       omp.inner.for.inc98:
30556 // CHECK16-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30557 // CHECK16-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
30558 // CHECK16-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30559 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
30560 // CHECK16:       omp.inner.for.end100:
30561 // CHECK16-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30562 // CHECK16-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
30563 // CHECK16-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
30564 // CHECK16-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
30565 // CHECK16-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
30566 // CHECK16-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
30567 // CHECK16-NEXT:    br label [[SIMD_IF_END105]]
30568 // CHECK16:       simd.if.end105:
30569 // CHECK16-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
30570 // CHECK16-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
30571 // CHECK16-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
30572 // CHECK16-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
30573 // CHECK16-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30574 // CHECK16-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
30575 // CHECK16-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
30576 // CHECK16-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
30577 // CHECK16-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
30578 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
30579 // CHECK16-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
30580 // CHECK16-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
30581 // CHECK16-NEXT:    store i32 0, i32* [[I115]], align 4
30582 // CHECK16-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30583 // CHECK16-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
30584 // CHECK16-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
30585 // CHECK16:       simd.if.then117:
30586 // CHECK16-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
30587 // CHECK16-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
30588 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
30589 // CHECK16:       omp.inner.for.cond120:
30590 // CHECK16-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30591 // CHECK16-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
30592 // CHECK16-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
30593 // CHECK16-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
30594 // CHECK16:       omp.inner.for.body122:
30595 // CHECK16-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30596 // CHECK16-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
30597 // CHECK16-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
30598 // CHECK16-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
30599 // CHECK16-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
30600 // CHECK16-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30601 // CHECK16-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
30602 // CHECK16-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
30603 // CHECK16-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
30604 // CHECK16-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30605 // CHECK16-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
30606 // CHECK16-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
30607 // CHECK16-NEXT:    [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
30608 // CHECK16-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
30609 // CHECK16-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30610 // CHECK16-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
30611 // CHECK16-NEXT:    store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
30612 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
30613 // CHECK16:       omp.body.continue129:
30614 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
30615 // CHECK16:       omp.inner.for.inc130:
30616 // CHECK16-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30617 // CHECK16-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
30618 // CHECK16-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30619 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
30620 // CHECK16:       omp.inner.for.end132:
30621 // CHECK16-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30622 // CHECK16-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
30623 // CHECK16-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
30624 // CHECK16-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
30625 // CHECK16-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
30626 // CHECK16-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
30627 // CHECK16-NEXT:    br label [[SIMD_IF_END137]]
30628 // CHECK16:       simd.if.end137:
30629 // CHECK16-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
30630 // CHECK16-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
30631 // CHECK16-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30632 // CHECK16-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
30633 // CHECK16-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
30634 // CHECK16-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
30635 // CHECK16-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
30636 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
30637 // CHECK16-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
30638 // CHECK16-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
30639 // CHECK16-NEXT:    store i32 0, i32* [[I146]], align 4
30640 // CHECK16-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30641 // CHECK16-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
30642 // CHECK16-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
30643 // CHECK16:       simd.if.then148:
30644 // CHECK16-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
30645 // CHECK16-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
30646 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
30647 // CHECK16:       omp.inner.for.cond151:
30648 // CHECK16-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30649 // CHECK16-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
30650 // CHECK16-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
30651 // CHECK16-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
30652 // CHECK16:       omp.inner.for.body153:
30653 // CHECK16-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30654 // CHECK16-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
30655 // CHECK16-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
30656 // CHECK16-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
30657 // CHECK16-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
30658 // CHECK16-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30659 // CHECK16-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
30660 // CHECK16-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
30661 // CHECK16-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
30662 // CHECK16-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30663 // CHECK16-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
30664 // CHECK16-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
30665 // CHECK16-NEXT:    [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
30666 // CHECK16-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
30667 // CHECK16-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30668 // CHECK16-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
30669 // CHECK16-NEXT:    store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
30670 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
30671 // CHECK16:       omp.body.continue160:
30672 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
30673 // CHECK16:       omp.inner.for.inc161:
30674 // CHECK16-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30675 // CHECK16-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
30676 // CHECK16-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30677 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
30678 // CHECK16:       omp.inner.for.end163:
30679 // CHECK16-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30680 // CHECK16-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
30681 // CHECK16-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
30682 // CHECK16-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
30683 // CHECK16-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
30684 // CHECK16-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
30685 // CHECK16-NEXT:    br label [[SIMD_IF_END168]]
30686 // CHECK16:       simd.if.end168:
30687 // CHECK16-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
30688 // CHECK16-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
30689 // CHECK16-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
30690 // CHECK16-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
30691 // CHECK16-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30692 // CHECK16-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
30693 // CHECK16-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
30694 // CHECK16-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
30695 // CHECK16-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
30696 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
30697 // CHECK16-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
30698 // CHECK16-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
30699 // CHECK16-NEXT:    store i32 0, i32* [[I178]], align 4
30700 // CHECK16-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30701 // CHECK16-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
30702 // CHECK16-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
30703 // CHECK16:       simd.if.then180:
30704 // CHECK16-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
30705 // CHECK16-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
30706 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
30707 // CHECK16:       omp.inner.for.cond183:
30708 // CHECK16-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30709 // CHECK16-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
30710 // CHECK16-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
30711 // CHECK16-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
30712 // CHECK16:       omp.inner.for.body185:
30713 // CHECK16-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30714 // CHECK16-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
30715 // CHECK16-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
30716 // CHECK16-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
30717 // CHECK16-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
30718 // CHECK16-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30719 // CHECK16-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
30720 // CHECK16-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
30721 // CHECK16-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
30722 // CHECK16-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30723 // CHECK16-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
30724 // CHECK16-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
30725 // CHECK16-NEXT:    [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
30726 // CHECK16-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
30727 // CHECK16-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30728 // CHECK16-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
30729 // CHECK16-NEXT:    store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
30730 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
30731 // CHECK16:       omp.body.continue192:
30732 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
30733 // CHECK16:       omp.inner.for.inc193:
30734 // CHECK16-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30735 // CHECK16-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
30736 // CHECK16-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30737 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
30738 // CHECK16:       omp.inner.for.end195:
30739 // CHECK16-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30740 // CHECK16-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
30741 // CHECK16-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
30742 // CHECK16-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
30743 // CHECK16-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
30744 // CHECK16-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
30745 // CHECK16-NEXT:    br label [[SIMD_IF_END200]]
30746 // CHECK16:       simd.if.end200:
30747 // CHECK16-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
30748 // CHECK16-NEXT:    ret i32 [[CALL]]
30749 //
30750 //
30751 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
30752 // CHECK16-SAME: () #[[ATTR1:[0-9]+]] comdat {
30753 // CHECK16-NEXT:  entry:
30754 // CHECK16-NEXT:    [[A:%.*]] = alloca i32*, align 4
30755 // CHECK16-NEXT:    [[B:%.*]] = alloca i32*, align 4
30756 // CHECK16-NEXT:    [[C:%.*]] = alloca i32*, align 4
30757 // CHECK16-NEXT:    [[N:%.*]] = alloca i32, align 4
30758 // CHECK16-NEXT:    [[CH:%.*]] = alloca i32, align 4
30759 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30760 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30761 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30762 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30763 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30764 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
30765 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30766 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
30767 // CHECK16-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
30768 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
30769 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
30770 // CHECK16-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30771 // CHECK16-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30772 // CHECK16-NEXT:    [[I21:%.*]] = alloca i32, align 4
30773 // CHECK16-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
30774 // CHECK16-NEXT:    [[I25:%.*]] = alloca i32, align 4
30775 // CHECK16-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
30776 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
30777 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
30778 // CHECK16-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
30779 // CHECK16-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
30780 // CHECK16-NEXT:    [[I52:%.*]] = alloca i32, align 4
30781 // CHECK16-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
30782 // CHECK16-NEXT:    [[I56:%.*]] = alloca i32, align 4
30783 // CHECK16-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
30784 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
30785 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
30786 // CHECK16-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
30787 // CHECK16-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
30788 // CHECK16-NEXT:    [[I83:%.*]] = alloca i32, align 4
30789 // CHECK16-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
30790 // CHECK16-NEXT:    [[I87:%.*]] = alloca i32, align 4
30791 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
30792 // CHECK16-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
30793 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
30794 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
30795 // CHECK16-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
30796 // CHECK16-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
30797 // CHECK16-NEXT:    [[I115:%.*]] = alloca i32, align 4
30798 // CHECK16-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
30799 // CHECK16-NEXT:    [[I119:%.*]] = alloca i32, align 4
30800 // CHECK16-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
30801 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
30802 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
30803 // CHECK16-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
30804 // CHECK16-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
30805 // CHECK16-NEXT:    [[I146:%.*]] = alloca i32, align 4
30806 // CHECK16-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
30807 // CHECK16-NEXT:    [[I150:%.*]] = alloca i32, align 4
30808 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
30809 // CHECK16-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
30810 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
30811 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
30812 // CHECK16-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
30813 // CHECK16-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
30814 // CHECK16-NEXT:    [[I178:%.*]] = alloca i32, align 4
30815 // CHECK16-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
30816 // CHECK16-NEXT:    [[I182:%.*]] = alloca i32, align 4
30817 // CHECK16-NEXT:    store i32 10000, i32* [[N]], align 4
30818 // CHECK16-NEXT:    store i32 100, i32* [[CH]], align 4
30819 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30820 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
30821 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30822 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
30823 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30824 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30825 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30826 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30827 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30828 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
30829 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
30830 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30831 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
30832 // CHECK16-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30833 // CHECK16:       simd.if.then:
30834 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30835 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
30836 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30837 // CHECK16:       omp.inner.for.cond:
30838 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30839 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
30840 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
30841 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30842 // CHECK16:       omp.inner.for.body:
30843 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30844 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
30845 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30846 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
30847 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
30848 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30849 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
30850 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
30851 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
30852 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30853 // CHECK16-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
30854 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
30855 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
30856 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
30857 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30858 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
30859 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
30860 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30861 // CHECK16:       omp.body.continue:
30862 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30863 // CHECK16:       omp.inner.for.inc:
30864 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30865 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
30866 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30867 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
30868 // CHECK16:       omp.inner.for.end:
30869 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30870 // CHECK16-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
30871 // CHECK16-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
30872 // CHECK16-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
30873 // CHECK16-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
30874 // CHECK16-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
30875 // CHECK16-NEXT:    br label [[SIMD_IF_END]]
30876 // CHECK16:       simd.if.end:
30877 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
30878 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
30879 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30880 // CHECK16-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
30881 // CHECK16-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
30882 // CHECK16-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
30883 // CHECK16-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
30884 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30885 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
30886 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
30887 // CHECK16-NEXT:    store i32 0, i32* [[I21]], align 4
30888 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30889 // CHECK16-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
30890 // CHECK16-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
30891 // CHECK16:       simd.if.then23:
30892 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30893 // CHECK16-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
30894 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
30895 // CHECK16:       omp.inner.for.cond26:
30896 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30897 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
30898 // CHECK16-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
30899 // CHECK16-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
30900 // CHECK16:       omp.inner.for.body28:
30901 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30902 // CHECK16-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
30903 // CHECK16-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
30904 // CHECK16-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
30905 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
30906 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30907 // CHECK16-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
30908 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
30909 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
30910 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30911 // CHECK16-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
30912 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
30913 // CHECK16-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
30914 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
30915 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30916 // CHECK16-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
30917 // CHECK16-NEXT:    store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
30918 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
30919 // CHECK16:       omp.body.continue35:
30920 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
30921 // CHECK16:       omp.inner.for.inc36:
30922 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30923 // CHECK16-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
30924 // CHECK16-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30925 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
30926 // CHECK16:       omp.inner.for.end38:
30927 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30928 // CHECK16-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
30929 // CHECK16-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
30930 // CHECK16-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
30931 // CHECK16-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
30932 // CHECK16-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
30933 // CHECK16-NEXT:    br label [[SIMD_IF_END43]]
30934 // CHECK16:       simd.if.end43:
30935 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
30936 // CHECK16-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
30937 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30938 // CHECK16-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
30939 // CHECK16-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
30940 // CHECK16-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
30941 // CHECK16-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
30942 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
30943 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
30944 // CHECK16-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
30945 // CHECK16-NEXT:    store i32 0, i32* [[I52]], align 4
30946 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30947 // CHECK16-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
30948 // CHECK16-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
30949 // CHECK16:       simd.if.then54:
30950 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
30951 // CHECK16-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
30952 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
30953 // CHECK16:       omp.inner.for.cond57:
30954 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30955 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
30956 // CHECK16-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
30957 // CHECK16-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
30958 // CHECK16:       omp.inner.for.body59:
30959 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30960 // CHECK16-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
30961 // CHECK16-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
30962 // CHECK16-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
30963 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
30964 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30965 // CHECK16-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
30966 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
30967 // CHECK16-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
30968 // CHECK16-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30969 // CHECK16-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
30970 // CHECK16-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
30971 // CHECK16-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
30972 // CHECK16-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
30973 // CHECK16-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30974 // CHECK16-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
30975 // CHECK16-NEXT:    store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
30976 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30977 // CHECK16:       omp.body.continue66:
30978 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30979 // CHECK16:       omp.inner.for.inc67:
30980 // CHECK16-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30981 // CHECK16-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30982 // CHECK16-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30983 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
30984 // CHECK16:       omp.inner.for.end69:
30985 // CHECK16-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30986 // CHECK16-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30987 // CHECK16-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30988 // CHECK16-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30989 // CHECK16-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30990 // CHECK16-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30991 // CHECK16-NEXT:    br label [[SIMD_IF_END74]]
30992 // CHECK16:       simd.if.end74:
30993 // CHECK16-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
30994 // CHECK16-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
30995 // CHECK16-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30996 // CHECK16-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
30997 // CHECK16-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
30998 // CHECK16-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
30999 // CHECK16-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
31000 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
31001 // CHECK16-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
31002 // CHECK16-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
31003 // CHECK16-NEXT:    store i32 0, i32* [[I83]], align 4
31004 // CHECK16-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
31005 // CHECK16-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
31006 // CHECK16-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
31007 // CHECK16:       simd.if.then85:
31008 // CHECK16-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
31009 // CHECK16-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
31010 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
31011 // CHECK16:       omp.inner.for.cond88:
31012 // CHECK16-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31013 // CHECK16-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
31014 // CHECK16-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
31015 // CHECK16-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
31016 // CHECK16:       omp.inner.for.body90:
31017 // CHECK16-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31018 // CHECK16-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
31019 // CHECK16-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
31020 // CHECK16-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
31021 // CHECK16-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
31022 // CHECK16-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31023 // CHECK16-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
31024 // CHECK16-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
31025 // CHECK16-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
31026 // CHECK16-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31027 // CHECK16-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
31028 // CHECK16-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
31029 // CHECK16-NEXT:    [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
31030 // CHECK16-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
31031 // CHECK16-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31032 // CHECK16-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
31033 // CHECK16-NEXT:    store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
31034 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
31035 // CHECK16:       omp.body.continue97:
31036 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
31037 // CHECK16:       omp.inner.for.inc98:
31038 // CHECK16-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31039 // CHECK16-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
31040 // CHECK16-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31041 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
31042 // CHECK16:       omp.inner.for.end100:
31043 // CHECK16-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
31044 // CHECK16-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
31045 // CHECK16-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
31046 // CHECK16-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
31047 // CHECK16-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
31048 // CHECK16-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
31049 // CHECK16-NEXT:    br label [[SIMD_IF_END105]]
31050 // CHECK16:       simd.if.end105:
31051 // CHECK16-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
31052 // CHECK16-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
31053 // CHECK16-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
31054 // CHECK16-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
31055 // CHECK16-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31056 // CHECK16-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
31057 // CHECK16-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
31058 // CHECK16-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
31059 // CHECK16-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
31060 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
31061 // CHECK16-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
31062 // CHECK16-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
31063 // CHECK16-NEXT:    store i32 0, i32* [[I115]], align 4
31064 // CHECK16-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31065 // CHECK16-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
31066 // CHECK16-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
31067 // CHECK16:       simd.if.then117:
31068 // CHECK16-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
31069 // CHECK16-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
31070 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
31071 // CHECK16:       omp.inner.for.cond120:
31072 // CHECK16-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31073 // CHECK16-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
31074 // CHECK16-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
31075 // CHECK16-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
31076 // CHECK16:       omp.inner.for.body122:
31077 // CHECK16-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31078 // CHECK16-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
31079 // CHECK16-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
31080 // CHECK16-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
31081 // CHECK16-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
31082 // CHECK16-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31083 // CHECK16-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
31084 // CHECK16-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
31085 // CHECK16-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
31086 // CHECK16-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31087 // CHECK16-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
31088 // CHECK16-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
31089 // CHECK16-NEXT:    [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
31090 // CHECK16-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
31091 // CHECK16-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31092 // CHECK16-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
31093 // CHECK16-NEXT:    store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
31094 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
31095 // CHECK16:       omp.body.continue129:
31096 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
31097 // CHECK16:       omp.inner.for.inc130:
31098 // CHECK16-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31099 // CHECK16-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
31100 // CHECK16-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31101 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
31102 // CHECK16:       omp.inner.for.end132:
31103 // CHECK16-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31104 // CHECK16-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
31105 // CHECK16-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
31106 // CHECK16-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
31107 // CHECK16-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
31108 // CHECK16-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
31109 // CHECK16-NEXT:    br label [[SIMD_IF_END137]]
31110 // CHECK16:       simd.if.end137:
31111 // CHECK16-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
31112 // CHECK16-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
31113 // CHECK16-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31114 // CHECK16-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
31115 // CHECK16-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
31116 // CHECK16-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
31117 // CHECK16-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
31118 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
31119 // CHECK16-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
31120 // CHECK16-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
31121 // CHECK16-NEXT:    store i32 0, i32* [[I146]], align 4
31122 // CHECK16-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31123 // CHECK16-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
31124 // CHECK16-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
31125 // CHECK16:       simd.if.then148:
31126 // CHECK16-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
31127 // CHECK16-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
31128 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
31129 // CHECK16:       omp.inner.for.cond151:
31130 // CHECK16-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31131 // CHECK16-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
31132 // CHECK16-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
31133 // CHECK16-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
31134 // CHECK16:       omp.inner.for.body153:
31135 // CHECK16-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31136 // CHECK16-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
31137 // CHECK16-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
31138 // CHECK16-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
31139 // CHECK16-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
31140 // CHECK16-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31141 // CHECK16-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
31142 // CHECK16-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
31143 // CHECK16-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
31144 // CHECK16-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31145 // CHECK16-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
31146 // CHECK16-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
31147 // CHECK16-NEXT:    [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
31148 // CHECK16-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
31149 // CHECK16-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31150 // CHECK16-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
31151 // CHECK16-NEXT:    store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
31152 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
31153 // CHECK16:       omp.body.continue160:
31154 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
31155 // CHECK16:       omp.inner.for.inc161:
31156 // CHECK16-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31157 // CHECK16-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
31158 // CHECK16-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31159 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
31160 // CHECK16:       omp.inner.for.end163:
31161 // CHECK16-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31162 // CHECK16-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
31163 // CHECK16-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
31164 // CHECK16-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
31165 // CHECK16-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
31166 // CHECK16-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
31167 // CHECK16-NEXT:    br label [[SIMD_IF_END168]]
31168 // CHECK16:       simd.if.end168:
31169 // CHECK16-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
31170 // CHECK16-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
31171 // CHECK16-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
31172 // CHECK16-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
31173 // CHECK16-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31174 // CHECK16-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
31175 // CHECK16-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
31176 // CHECK16-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
31177 // CHECK16-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
31178 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
31179 // CHECK16-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
31180 // CHECK16-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
31181 // CHECK16-NEXT:    store i32 0, i32* [[I178]], align 4
31182 // CHECK16-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31183 // CHECK16-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
31184 // CHECK16-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
31185 // CHECK16:       simd.if.then180:
31186 // CHECK16-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
31187 // CHECK16-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
31188 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
31189 // CHECK16:       omp.inner.for.cond183:
31190 // CHECK16-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31191 // CHECK16-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
31192 // CHECK16-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
31193 // CHECK16-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
31194 // CHECK16:       omp.inner.for.body185:
31195 // CHECK16-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31196 // CHECK16-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
31197 // CHECK16-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
31198 // CHECK16-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
31199 // CHECK16-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
31200 // CHECK16-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31201 // CHECK16-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
31202 // CHECK16-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
31203 // CHECK16-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
31204 // CHECK16-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31205 // CHECK16-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
31206 // CHECK16-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
31207 // CHECK16-NEXT:    [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
31208 // CHECK16-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
31209 // CHECK16-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31210 // CHECK16-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
31211 // CHECK16-NEXT:    store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
31212 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
31213 // CHECK16:       omp.body.continue192:
31214 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
31215 // CHECK16:       omp.inner.for.inc193:
31216 // CHECK16-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31217 // CHECK16-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
31218 // CHECK16-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31219 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
31220 // CHECK16:       omp.inner.for.end195:
31221 // CHECK16-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31222 // CHECK16-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
31223 // CHECK16-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
31224 // CHECK16-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
31225 // CHECK16-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
31226 // CHECK16-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
31227 // CHECK16-NEXT:    br label [[SIMD_IF_END200]]
31228 // CHECK16:       simd.if.end200:
31229 // CHECK16-NEXT:    ret i32 0
31230 //
31231