1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host codegen.
3 // RUN: %clang_cc1 -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
9 
10 // Test target codegen - host bc file has to be created first.
11 // RUN: %clang_cc1 -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
12 // RUN: %clang_cc1 -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK5
13 // RUN: %clang_cc1 -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
14 // RUN: %clang_cc1 -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
15 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
16 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK7
17 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
18 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
19 
20 // expected-no-diagnostics
21 #ifndef HEADER
22 #define HEADER
23 
24 
mapWithPrivate()25 void mapWithPrivate() {
26   int x, y;
27   #pragma omp target teams private(x) map(x,y) private(y)
28     ;
29 }
30 
mapWithFirstprivate()31 void mapWithFirstprivate() {
32   int x, y;
33   #pragma omp target teams firstprivate(x) map(x,y) firstprivate(y)
34     ;
35 }
36 
mapWithReduction()37 void mapWithReduction() {
38   int x, y;
39   #pragma omp target teams reduction(+:x) map(x,y) reduction(+:y)
40     ;
41 }
42 
mapFrom()43 void mapFrom() {
44   int x;
45   #pragma omp target teams firstprivate(x) map(from:x)
46     ;
47 }
48 
mapTo()49 void mapTo() {
50   int x;
51   #pragma omp target teams firstprivate(x) map(to:x)
52     ;
53 }
54 
mapAlloc()55 void mapAlloc() {
56   int x;
57   #pragma omp target teams firstprivate(x) map(alloc:x)
58     ;
59 }
60 
mapArray()61 void mapArray() {
62   int x[77], y[88], z[99];
63   #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z)
64     ;
65   #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(to:x,y,z)
66     ;
67 }
68 
69 # if HAS_INT128
mapInt128()70 void mapInt128() {
71   __int128 x, y, z;
72   #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z)
73     ;
74   #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(from:x,y,z)
75     ;
76 }
77 # endif
78 #endif
79 // CHECK1-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
80 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
81 // CHECK1-NEXT:  entry:
82 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
83 // CHECK1-NEXT:    [[Y:%.*]] = alloca i32, align 4
84 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
85 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
86 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
87 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
88 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
89 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
90 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
91 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
92 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
93 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
94 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
95 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
96 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
97 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
98 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
99 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
100 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
101 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
102 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
103 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
104 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
105 // CHECK1-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
106 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
107 // CHECK1-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
108 // CHECK1:       omp_offload.failed:
109 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
110 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
111 // CHECK1:       omp_offload.cont:
112 // CHECK1-NEXT:    ret void
113 //
114 //
115 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
116 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
117 // CHECK1-NEXT:  entry:
118 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
119 // CHECK1-NEXT:    ret void
120 //
121 //
122 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
123 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
124 // CHECK1-NEXT:  entry:
125 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
126 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
127 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
128 // CHECK1-NEXT:    [[Y:%.*]] = alloca i32, align 4
129 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
130 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
131 // CHECK1-NEXT:    ret void
132 //
133 //
134 // CHECK1-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
135 // CHECK1-SAME: () #[[ATTR0]] {
136 // CHECK1-NEXT:  entry:
137 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
138 // CHECK1-NEXT:    [[Y:%.*]] = alloca i32, align 4
139 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
140 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
141 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
142 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
143 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
144 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
145 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
146 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
147 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
148 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
149 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
150 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
151 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
152 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
153 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
154 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
155 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
156 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
157 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
158 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
159 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
160 // CHECK1-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
161 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
162 // CHECK1-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
163 // CHECK1:       omp_offload.failed:
164 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
165 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
166 // CHECK1:       omp_offload.cont:
167 // CHECK1-NEXT:    ret void
168 //
169 //
170 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
171 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
172 // CHECK1-NEXT:  entry:
173 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
174 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
175 // CHECK1-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
176 // CHECK1-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
177 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
178 // CHECK1-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
179 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
180 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
181 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
182 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
183 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
184 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
185 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
186 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
187 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[CONV1]], align 4
188 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
189 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
190 // CHECK1-NEXT:    ret void
191 //
192 //
193 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
194 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR1]] {
195 // CHECK1-NEXT:  entry:
196 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
197 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
198 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
199 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
200 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
201 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
202 // CHECK1-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
203 // CHECK1-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
204 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
205 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
206 // CHECK1-NEXT:    ret void
207 //
208 //
209 // CHECK1-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
210 // CHECK1-SAME: () #[[ATTR0]] {
211 // CHECK1-NEXT:  entry:
212 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
213 // CHECK1-NEXT:    [[Y:%.*]] = alloca i32, align 4
214 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
215 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
216 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
217 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
218 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
219 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
220 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
221 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
222 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
223 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
224 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
225 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
226 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
227 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
228 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
229 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
230 // CHECK1-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
231 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
232 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
233 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
234 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
235 // CHECK1-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
236 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
237 // CHECK1-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
238 // CHECK1:       omp_offload.failed:
239 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
240 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
241 // CHECK1:       omp_offload.cont:
242 // CHECK1-NEXT:    ret void
243 //
244 //
245 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
246 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
247 // CHECK1-NEXT:  entry:
248 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
249 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
250 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
251 // CHECK1-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
252 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
253 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
254 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
255 // CHECK1-NEXT:    ret void
256 //
257 //
258 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
259 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
260 // CHECK1-NEXT:  entry:
261 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
262 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
263 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
264 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
265 // CHECK1-NEXT:    [[X1:%.*]] = alloca i32, align 4
266 // CHECK1-NEXT:    [[Y2:%.*]] = alloca i32, align 4
267 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
268 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
269 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
270 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
271 // CHECK1-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
272 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
273 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
274 // CHECK1-NEXT:    store i32 0, i32* [[X1]], align 4
275 // CHECK1-NEXT:    store i32 0, i32* [[Y2]], align 4
276 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
277 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
278 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 8
279 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
280 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
281 // CHECK1-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
282 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
283 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
284 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
285 // CHECK1-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
286 // CHECK1-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
287 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
288 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
289 // CHECK1-NEXT:    ]
290 // CHECK1:       .omp.reduction.case1:
291 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
292 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
293 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
294 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
295 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
296 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
297 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
298 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
299 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
300 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
301 // CHECK1:       .omp.reduction.case2:
302 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
303 // CHECK1-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
304 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
305 // CHECK1-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
306 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
307 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
308 // CHECK1:       .omp.reduction.default:
309 // CHECK1-NEXT:    ret void
310 //
311 //
312 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
313 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
314 // CHECK1-NEXT:  entry:
315 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
316 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
317 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
318 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
319 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
320 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
321 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
322 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
323 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
324 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
325 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
326 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
327 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
328 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
329 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
330 // CHECK1-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
331 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
332 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
333 // CHECK1-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
334 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
335 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
336 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
337 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
338 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
339 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
340 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
341 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
342 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
343 // CHECK1-NEXT:    ret void
344 //
345 //
346 // CHECK1-LABEL: define {{[^@]+}}@_Z7mapFromv
347 // CHECK1-SAME: () #[[ATTR0]] {
348 // CHECK1-NEXT:  entry:
349 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
350 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
351 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
352 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
353 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
354 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
355 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
356 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
357 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
358 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
359 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
360 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
361 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
362 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
363 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
364 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
365 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
366 // CHECK1:       omp_offload.failed:
367 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
368 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
369 // CHECK1:       omp_offload.cont:
370 // CHECK1-NEXT:    ret void
371 //
372 //
373 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
374 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
375 // CHECK1-NEXT:  entry:
376 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
377 // CHECK1-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
378 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
379 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
380 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
381 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
382 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
383 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
384 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP2]])
385 // CHECK1-NEXT:    ret void
386 //
387 //
388 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
389 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
390 // CHECK1-NEXT:  entry:
391 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
392 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
393 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
394 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
395 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
396 // CHECK1-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
397 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
398 // CHECK1-NEXT:    ret void
399 //
400 //
401 // CHECK1-LABEL: define {{[^@]+}}@_Z5mapTov
402 // CHECK1-SAME: () #[[ATTR0]] {
403 // CHECK1-NEXT:  entry:
404 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
405 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
406 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
407 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
408 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
409 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
410 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
411 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
412 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
413 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
414 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
415 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
416 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
417 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
418 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
419 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
420 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
421 // CHECK1:       omp_offload.failed:
422 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
423 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
424 // CHECK1:       omp_offload.cont:
425 // CHECK1-NEXT:    ret void
426 //
427 //
428 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
429 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
430 // CHECK1-NEXT:  entry:
431 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
432 // CHECK1-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
433 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
434 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
435 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
436 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
437 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
438 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
439 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP2]])
440 // CHECK1-NEXT:    ret void
441 //
442 //
443 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
444 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
445 // CHECK1-NEXT:  entry:
446 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
447 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
448 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
449 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
450 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
451 // CHECK1-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
452 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
453 // CHECK1-NEXT:    ret void
454 //
455 //
456 // CHECK1-LABEL: define {{[^@]+}}@_Z8mapAllocv
457 // CHECK1-SAME: () #[[ATTR0]] {
458 // CHECK1-NEXT:  entry:
459 // CHECK1-NEXT:    [[X:%.*]] = alloca i32, align 4
460 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
461 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
462 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
463 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
464 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
465 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
466 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
467 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
468 // CHECK1-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
469 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
470 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
471 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
472 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
473 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
474 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
475 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
476 // CHECK1:       omp_offload.failed:
477 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
478 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
479 // CHECK1:       omp_offload.cont:
480 // CHECK1-NEXT:    ret void
481 //
482 //
483 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
484 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
485 // CHECK1-NEXT:  entry:
486 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
487 // CHECK1-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
488 // CHECK1-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
489 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
490 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
491 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
492 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
493 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
494 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]])
495 // CHECK1-NEXT:    ret void
496 //
497 //
498 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13
499 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
500 // CHECK1-NEXT:  entry:
501 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
502 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
503 // CHECK1-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
504 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
505 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
506 // CHECK1-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
507 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
508 // CHECK1-NEXT:    ret void
509 //
510 //
511 // CHECK1-LABEL: define {{[^@]+}}@_Z8mapArrayv
512 // CHECK1-SAME: () #[[ATTR0]] {
513 // CHECK1-NEXT:  entry:
514 // CHECK1-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
515 // CHECK1-NEXT:    [[Y:%.*]] = alloca [88 x i32], align 4
516 // CHECK1-NEXT:    [[Z:%.*]] = alloca [99 x i32], align 4
517 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
518 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
519 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
520 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
521 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
522 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
523 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
524 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
525 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 8
526 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
527 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
528 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 8
529 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
530 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
531 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
532 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
533 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 8
534 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
535 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
536 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 8
537 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
538 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
539 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
540 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
541 // CHECK1-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 8
542 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
543 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
544 // CHECK1-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 8
545 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
546 // CHECK1-NEXT:    store i8* null, i8** [[TMP14]], align 8
547 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
548 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
549 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
550 // CHECK1-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
551 // CHECK1-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
552 // CHECK1:       omp_offload.failed:
553 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
554 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
555 // CHECK1:       omp_offload.cont:
556 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
557 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [88 x i32]**
558 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP20]], align 8
559 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
560 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [88 x i32]**
561 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP22]], align 8
562 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
563 // CHECK1-NEXT:    store i8* null, i8** [[TMP23]], align 8
564 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
565 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [99 x i32]**
566 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP25]], align 8
567 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
568 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to [99 x i32]**
569 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP27]], align 8
570 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
571 // CHECK1-NEXT:    store i8* null, i8** [[TMP28]], align 8
572 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
573 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [77 x i32]**
574 // CHECK1-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP30]], align 8
575 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
576 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [77 x i32]**
577 // CHECK1-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP32]], align 8
578 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
579 // CHECK1-NEXT:    store i8* null, i8** [[TMP33]], align 8
580 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
581 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
582 // CHECK1-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
583 // CHECK1-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
584 // CHECK1-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
585 // CHECK1:       omp_offload.failed4:
586 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
587 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
588 // CHECK1:       omp_offload.cont5:
589 // CHECK1-NEXT:    ret void
590 //
591 //
592 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
593 // CHECK1-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
594 // CHECK1-NEXT:  entry:
595 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
596 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
597 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
598 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
599 // CHECK1-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
600 // CHECK1-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
601 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
602 // CHECK1-NEXT:    ret void
603 //
604 //
605 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16
606 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
607 // CHECK1-NEXT:  entry:
608 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
609 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
610 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
611 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
612 // CHECK1-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
613 // CHECK1-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
614 // CHECK1-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
615 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
616 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
617 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
618 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
619 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
620 // CHECK1-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
621 // CHECK1-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
622 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
623 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
624 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
625 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
626 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
627 // CHECK1-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
628 // CHECK1-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
629 // CHECK1:       omp.arrayinit.body:
630 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
631 // CHECK1-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
632 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
633 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
634 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
635 // CHECK1:       omp.arrayinit.done:
636 // CHECK1-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
637 // CHECK1-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
638 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
639 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
640 // CHECK1-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
641 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
642 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
643 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
644 // CHECK1-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
645 // CHECK1-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
646 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
647 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
648 // CHECK1-NEXT:    ]
649 // CHECK1:       .omp.reduction.case1:
650 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
651 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
652 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
653 // CHECK1:       omp.arraycpy.body:
654 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
655 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
656 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
657 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
658 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
659 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
660 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
661 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
662 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
663 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
664 // CHECK1:       omp.arraycpy.done6:
665 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
666 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
667 // CHECK1:       .omp.reduction.case2:
668 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
669 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
670 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
671 // CHECK1:       omp.arraycpy.body8:
672 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
673 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
674 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
675 // CHECK1-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
676 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
677 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
678 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
679 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
680 // CHECK1:       omp.arraycpy.done14:
681 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
682 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
683 // CHECK1:       .omp.reduction.default:
684 // CHECK1-NEXT:    ret void
685 //
686 //
687 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
688 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
689 // CHECK1-NEXT:  entry:
690 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
691 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
692 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
693 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
694 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
695 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
696 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
697 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
698 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
699 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
700 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
701 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
702 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
703 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
704 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
705 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
706 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
707 // CHECK1:       omp.arraycpy.body:
708 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
709 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
710 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
711 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
712 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
713 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
714 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
715 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
716 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
717 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
718 // CHECK1:       omp.arraycpy.done2:
719 // CHECK1-NEXT:    ret void
720 //
721 //
722 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
723 // CHECK1-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
724 // CHECK1-NEXT:  entry:
725 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
726 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
727 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
728 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
729 // CHECK1-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
730 // CHECK1-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
731 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
732 // CHECK1-NEXT:    ret void
733 //
734 //
735 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..20
736 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
737 // CHECK1-NEXT:  entry:
738 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
739 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
740 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
741 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
742 // CHECK1-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
743 // CHECK1-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
744 // CHECK1-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
745 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
746 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
747 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
748 // CHECK1-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
749 // CHECK1-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
750 // CHECK1-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
751 // CHECK1-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
752 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
753 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
754 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
755 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
756 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
757 // CHECK1-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
758 // CHECK1-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
759 // CHECK1:       omp.arrayinit.body:
760 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
761 // CHECK1-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
762 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
763 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
764 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
765 // CHECK1:       omp.arrayinit.done:
766 // CHECK1-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
767 // CHECK1-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
768 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
769 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
770 // CHECK1-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
771 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
772 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
773 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
774 // CHECK1-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
775 // CHECK1-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
776 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
777 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
778 // CHECK1-NEXT:    ]
779 // CHECK1:       .omp.reduction.case1:
780 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
781 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
782 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
783 // CHECK1:       omp.arraycpy.body:
784 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
785 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
786 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
787 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
788 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
789 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
790 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
791 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
792 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
793 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
794 // CHECK1:       omp.arraycpy.done6:
795 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
796 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
797 // CHECK1:       .omp.reduction.case2:
798 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
799 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
800 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
801 // CHECK1:       omp.arraycpy.body8:
802 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
803 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
804 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
805 // CHECK1-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
806 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
807 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
808 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
809 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
810 // CHECK1:       omp.arraycpy.done14:
811 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
812 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
813 // CHECK1:       .omp.reduction.default:
814 // CHECK1-NEXT:    ret void
815 //
816 //
817 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
818 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
819 // CHECK1-NEXT:  entry:
820 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
821 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
822 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
823 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
824 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
825 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
826 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
827 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
828 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
829 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
830 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
831 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
832 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
833 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
834 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
835 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
836 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
837 // CHECK1:       omp.arraycpy.body:
838 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
839 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
840 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
841 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
842 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
843 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
844 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
845 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
846 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
847 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
848 // CHECK1:       omp.arraycpy.done2:
849 // CHECK1-NEXT:    ret void
850 //
851 //
852 // CHECK1-LABEL: define {{[^@]+}}@_Z9mapInt128v
853 // CHECK1-SAME: () #[[ATTR0]] {
854 // CHECK1-NEXT:  entry:
855 // CHECK1-NEXT:    [[X:%.*]] = alloca i128, align 16
856 // CHECK1-NEXT:    [[Y:%.*]] = alloca i128, align 16
857 // CHECK1-NEXT:    [[Z:%.*]] = alloca i128, align 16
858 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
859 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
860 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
861 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
862 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
863 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
864 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
865 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i128**
866 // CHECK1-NEXT:    store i128* [[Y]], i128** [[TMP1]], align 8
867 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
868 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i128**
869 // CHECK1-NEXT:    store i128* [[Y]], i128** [[TMP3]], align 8
870 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
871 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
872 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
873 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i128**
874 // CHECK1-NEXT:    store i128* [[Z]], i128** [[TMP6]], align 8
875 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
876 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i128**
877 // CHECK1-NEXT:    store i128* [[Z]], i128** [[TMP8]], align 8
878 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
879 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
880 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
881 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i128**
882 // CHECK1-NEXT:    store i128* [[X]], i128** [[TMP11]], align 8
883 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
884 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i128**
885 // CHECK1-NEXT:    store i128* [[X]], i128** [[TMP13]], align 8
886 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
887 // CHECK1-NEXT:    store i8* null, i8** [[TMP14]], align 8
888 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
889 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
890 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.26, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.27, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
891 // CHECK1-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
892 // CHECK1-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
893 // CHECK1:       omp_offload.failed:
894 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
895 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
896 // CHECK1:       omp_offload.cont:
897 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
898 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i128**
899 // CHECK1-NEXT:    store i128* [[Y]], i128** [[TMP20]], align 8
900 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
901 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i128**
902 // CHECK1-NEXT:    store i128* [[Y]], i128** [[TMP22]], align 8
903 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
904 // CHECK1-NEXT:    store i8* null, i8** [[TMP23]], align 8
905 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
906 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i128**
907 // CHECK1-NEXT:    store i128* [[Z]], i128** [[TMP25]], align 8
908 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
909 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i128**
910 // CHECK1-NEXT:    store i128* [[Z]], i128** [[TMP27]], align 8
911 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
912 // CHECK1-NEXT:    store i8* null, i8** [[TMP28]], align 8
913 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
914 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i128**
915 // CHECK1-NEXT:    store i128* [[X]], i128** [[TMP30]], align 8
916 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
917 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i128**
918 // CHECK1-NEXT:    store i128* [[X]], i128** [[TMP32]], align 8
919 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
920 // CHECK1-NEXT:    store i8* null, i8** [[TMP33]], align 8
921 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
922 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
923 // CHECK1-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
924 // CHECK1-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
925 // CHECK1-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
926 // CHECK1:       omp_offload.failed4:
927 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
928 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
929 // CHECK1:       omp_offload.cont5:
930 // CHECK1-NEXT:    ret void
931 //
932 //
933 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
934 // CHECK1-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
935 // CHECK1-NEXT:  entry:
936 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
937 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
938 // CHECK1-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
939 // CHECK1-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
940 // CHECK1-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
941 // CHECK1-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
942 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
943 // CHECK1-NEXT:    ret void
944 //
945 //
946 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..24
947 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
948 // CHECK1-NEXT:  entry:
949 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
950 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
951 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
952 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
953 // CHECK1-NEXT:    [[Y1:%.*]] = alloca i128, align 16
954 // CHECK1-NEXT:    [[X:%.*]] = alloca i128, align 16
955 // CHECK1-NEXT:    [[Z2:%.*]] = alloca i128, align 16
956 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
957 // CHECK1-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
958 // CHECK1-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
959 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i128, align 16
960 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
961 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
962 // CHECK1-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
963 // CHECK1-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
964 // CHECK1-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
965 // CHECK1-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
966 // CHECK1-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
967 // CHECK1-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
968 // CHECK1-NEXT:    store i128 0, i128* [[Z2]], align 16
969 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
970 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
971 // CHECK1-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
972 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
973 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
974 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
975 // CHECK1-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.25, [8 x i32]* @.gomp_critical_user_.reduction.var)
976 // CHECK1-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
977 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
978 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
979 // CHECK1-NEXT:    ]
980 // CHECK1:       .omp.reduction.case1:
981 // CHECK1-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
982 // CHECK1-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
983 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
984 // CHECK1-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
985 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
986 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
987 // CHECK1:       .omp.reduction.case2:
988 // CHECK1-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
989 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
990 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
991 // CHECK1-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0)
992 // CHECK1-NEXT:    br label [[ATOMIC_CONT:%.*]]
993 // CHECK1:       atomic_cont:
994 // CHECK1-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
995 // CHECK1-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
996 // CHECK1-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
997 // CHECK1-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
998 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
999 // CHECK1-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
1000 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
1001 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
1002 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
1003 // CHECK1-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0)
1004 // CHECK1-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1005 // CHECK1:       atomic_exit:
1006 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1007 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1008 // CHECK1:       .omp.reduction.default:
1009 // CHECK1-NEXT:    ret void
1010 //
1011 //
1012 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.25
1013 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1014 // CHECK1-NEXT:  entry:
1015 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1016 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1017 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1018 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1019 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1020 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1021 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1022 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1023 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1024 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1025 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
1026 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1027 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1028 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
1029 // CHECK1-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
1030 // CHECK1-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
1031 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
1032 // CHECK1-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
1033 // CHECK1-NEXT:    ret void
1034 //
1035 //
1036 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
1037 // CHECK1-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
1038 // CHECK1-NEXT:  entry:
1039 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
1040 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
1041 // CHECK1-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
1042 // CHECK1-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
1043 // CHECK1-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
1044 // CHECK1-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
1045 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..28 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
1046 // CHECK1-NEXT:    ret void
1047 //
1048 //
1049 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..28
1050 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
1051 // CHECK1-NEXT:  entry:
1052 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1053 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1054 // CHECK1-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
1055 // CHECK1-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
1056 // CHECK1-NEXT:    [[Y1:%.*]] = alloca i128, align 16
1057 // CHECK1-NEXT:    [[X:%.*]] = alloca i128, align 16
1058 // CHECK1-NEXT:    [[Z2:%.*]] = alloca i128, align 16
1059 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1060 // CHECK1-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
1061 // CHECK1-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
1062 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i128, align 16
1063 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1064 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1065 // CHECK1-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
1066 // CHECK1-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
1067 // CHECK1-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
1068 // CHECK1-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
1069 // CHECK1-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
1070 // CHECK1-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
1071 // CHECK1-NEXT:    store i128 0, i128* [[Z2]], align 16
1072 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1073 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
1074 // CHECK1-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
1075 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1076 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1077 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1078 // CHECK1-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.29, [8 x i32]* @.gomp_critical_user_.reduction.var)
1079 // CHECK1-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1080 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1081 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1082 // CHECK1-NEXT:    ]
1083 // CHECK1:       .omp.reduction.case1:
1084 // CHECK1-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
1085 // CHECK1-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
1086 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
1087 // CHECK1-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
1088 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1089 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1090 // CHECK1:       .omp.reduction.case2:
1091 // CHECK1-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
1092 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
1093 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
1094 // CHECK1-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0)
1095 // CHECK1-NEXT:    br label [[ATOMIC_CONT:%.*]]
1096 // CHECK1:       atomic_cont:
1097 // CHECK1-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
1098 // CHECK1-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
1099 // CHECK1-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
1100 // CHECK1-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
1101 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
1102 // CHECK1-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
1103 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
1104 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
1105 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
1106 // CHECK1-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0)
1107 // CHECK1-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1108 // CHECK1:       atomic_exit:
1109 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1110 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1111 // CHECK1:       .omp.reduction.default:
1112 // CHECK1-NEXT:    ret void
1113 //
1114 //
1115 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.29
1116 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1117 // CHECK1-NEXT:  entry:
1118 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1119 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1120 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1121 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1122 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1123 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1124 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1125 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1126 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1127 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1128 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
1129 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1130 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1131 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
1132 // CHECK1-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
1133 // CHECK1-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
1134 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
1135 // CHECK1-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
1136 // CHECK1-NEXT:    ret void
1137 //
1138 //
1139 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1140 // CHECK1-SAME: () #[[ATTR7:[0-9]+]] {
1141 // CHECK1-NEXT:  entry:
1142 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
1143 // CHECK1-NEXT:    ret void
1144 //
1145 //
1146 // CHECK2-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
1147 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1148 // CHECK2-NEXT:  entry:
1149 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1150 // CHECK2-NEXT:    [[Y:%.*]] = alloca i32, align 4
1151 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
1152 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
1153 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
1154 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1155 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1156 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1157 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1158 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1159 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1160 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1161 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1162 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1163 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
1164 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
1165 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1166 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
1167 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
1168 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1169 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
1170 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1171 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1172 // CHECK2-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1173 // CHECK2-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1174 // CHECK2-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1175 // CHECK2:       omp_offload.failed:
1176 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
1177 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1178 // CHECK2:       omp_offload.cont:
1179 // CHECK2-NEXT:    ret void
1180 //
1181 //
1182 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
1183 // CHECK2-SAME: () #[[ATTR1:[0-9]+]] {
1184 // CHECK2-NEXT:  entry:
1185 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1186 // CHECK2-NEXT:    ret void
1187 //
1188 //
1189 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
1190 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
1191 // CHECK2-NEXT:  entry:
1192 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1193 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1194 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1195 // CHECK2-NEXT:    [[Y:%.*]] = alloca i32, align 4
1196 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1197 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1198 // CHECK2-NEXT:    ret void
1199 //
1200 //
1201 // CHECK2-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
1202 // CHECK2-SAME: () #[[ATTR0]] {
1203 // CHECK2-NEXT:  entry:
1204 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1205 // CHECK2-NEXT:    [[Y:%.*]] = alloca i32, align 4
1206 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
1207 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
1208 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
1209 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1210 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1211 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1212 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1213 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1214 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1215 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1216 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1217 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1218 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
1219 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
1220 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1221 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
1222 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
1223 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1224 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
1225 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1226 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1227 // CHECK2-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1228 // CHECK2-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1229 // CHECK2-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1230 // CHECK2:       omp_offload.failed:
1231 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
1232 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1233 // CHECK2:       omp_offload.cont:
1234 // CHECK2-NEXT:    ret void
1235 //
1236 //
1237 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
1238 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
1239 // CHECK2-NEXT:  entry:
1240 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1241 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
1242 // CHECK2-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
1243 // CHECK2-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
1244 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1245 // CHECK2-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
1246 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1247 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
1248 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
1249 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
1250 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
1251 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
1252 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
1253 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
1254 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[CONV1]], align 4
1255 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
1256 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
1257 // CHECK2-NEXT:    ret void
1258 //
1259 //
1260 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
1261 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR1]] {
1262 // CHECK2-NEXT:  entry:
1263 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1264 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1265 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
1266 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
1267 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1268 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1269 // CHECK2-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
1270 // CHECK2-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
1271 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
1272 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
1273 // CHECK2-NEXT:    ret void
1274 //
1275 //
1276 // CHECK2-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
1277 // CHECK2-SAME: () #[[ATTR0]] {
1278 // CHECK2-NEXT:  entry:
1279 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1280 // CHECK2-NEXT:    [[Y:%.*]] = alloca i32, align 4
1281 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
1282 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
1283 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
1284 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1285 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1286 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1287 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1288 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1289 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1290 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1291 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1292 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1293 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
1294 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 8
1295 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1296 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
1297 // CHECK2-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 8
1298 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1299 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
1300 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1301 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1302 // CHECK2-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1303 // CHECK2-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
1304 // CHECK2-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1305 // CHECK2:       omp_offload.failed:
1306 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
1307 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1308 // CHECK2:       omp_offload.cont:
1309 // CHECK2-NEXT:    ret void
1310 //
1311 //
1312 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
1313 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
1314 // CHECK2-NEXT:  entry:
1315 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1316 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
1317 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1318 // CHECK2-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
1319 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1320 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
1321 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
1322 // CHECK2-NEXT:    ret void
1323 //
1324 //
1325 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
1326 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
1327 // CHECK2-NEXT:  entry:
1328 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1329 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1330 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1331 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
1332 // CHECK2-NEXT:    [[X1:%.*]] = alloca i32, align 4
1333 // CHECK2-NEXT:    [[Y2:%.*]] = alloca i32, align 4
1334 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
1335 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1336 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1337 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1338 // CHECK2-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
1339 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1340 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
1341 // CHECK2-NEXT:    store i32 0, i32* [[X1]], align 4
1342 // CHECK2-NEXT:    store i32 0, i32* [[Y2]], align 4
1343 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1344 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
1345 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 8
1346 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1347 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
1348 // CHECK2-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
1349 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1350 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1351 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1352 // CHECK2-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1353 // CHECK2-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1354 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1355 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1356 // CHECK2-NEXT:    ]
1357 // CHECK2:       .omp.reduction.case1:
1358 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
1359 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
1360 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
1361 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
1362 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
1363 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
1364 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1365 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
1366 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1367 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1368 // CHECK2:       .omp.reduction.case2:
1369 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
1370 // CHECK2-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
1371 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
1372 // CHECK2-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
1373 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1374 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1375 // CHECK2:       .omp.reduction.default:
1376 // CHECK2-NEXT:    ret void
1377 //
1378 //
1379 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1380 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
1381 // CHECK2-NEXT:  entry:
1382 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1383 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1384 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1385 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1386 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1387 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
1388 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1389 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
1390 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
1391 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1392 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1393 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
1394 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1395 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1396 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
1397 // CHECK2-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
1398 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
1399 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
1400 // CHECK2-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1401 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
1402 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
1403 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
1404 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
1405 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1406 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
1407 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
1408 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1409 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
1410 // CHECK2-NEXT:    ret void
1411 //
1412 //
1413 // CHECK2-LABEL: define {{[^@]+}}@_Z7mapFromv
1414 // CHECK2-SAME: () #[[ATTR0]] {
1415 // CHECK2-NEXT:  entry:
1416 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1417 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
1418 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
1419 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
1420 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1421 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1422 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1423 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1424 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1425 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1426 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1427 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1428 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1429 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1430 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1431 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1432 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1433 // CHECK2:       omp_offload.failed:
1434 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
1435 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1436 // CHECK2:       omp_offload.cont:
1437 // CHECK2-NEXT:    ret void
1438 //
1439 //
1440 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
1441 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
1442 // CHECK2-NEXT:  entry:
1443 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1444 // CHECK2-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
1445 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1446 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1447 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1448 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
1449 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
1450 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
1451 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP2]])
1452 // CHECK2-NEXT:    ret void
1453 //
1454 //
1455 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
1456 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
1457 // CHECK2-NEXT:  entry:
1458 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1459 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1460 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
1461 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1462 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1463 // CHECK2-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
1464 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
1465 // CHECK2-NEXT:    ret void
1466 //
1467 //
1468 // CHECK2-LABEL: define {{[^@]+}}@_Z5mapTov
1469 // CHECK2-SAME: () #[[ATTR0]] {
1470 // CHECK2-NEXT:  entry:
1471 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1472 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
1473 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
1474 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
1475 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1476 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1477 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1478 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1479 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1480 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1481 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1482 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1483 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1484 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1485 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1486 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1487 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1488 // CHECK2:       omp_offload.failed:
1489 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
1490 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1491 // CHECK2:       omp_offload.cont:
1492 // CHECK2-NEXT:    ret void
1493 //
1494 //
1495 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
1496 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
1497 // CHECK2-NEXT:  entry:
1498 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1499 // CHECK2-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
1500 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1501 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1502 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1503 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
1504 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
1505 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
1506 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP2]])
1507 // CHECK2-NEXT:    ret void
1508 //
1509 //
1510 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
1511 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
1512 // CHECK2-NEXT:  entry:
1513 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1514 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1515 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
1516 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1517 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1518 // CHECK2-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
1519 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
1520 // CHECK2-NEXT:    ret void
1521 //
1522 //
1523 // CHECK2-LABEL: define {{[^@]+}}@_Z8mapAllocv
1524 // CHECK2-SAME: () #[[ATTR0]] {
1525 // CHECK2-NEXT:  entry:
1526 // CHECK2-NEXT:    [[X:%.*]] = alloca i32, align 4
1527 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
1528 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
1529 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
1530 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1531 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
1532 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP1]], align 8
1533 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1534 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
1535 // CHECK2-NEXT:    store i32* [[X]], i32** [[TMP3]], align 8
1536 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1537 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1538 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1539 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1540 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1541 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1542 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1543 // CHECK2:       omp_offload.failed:
1544 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
1545 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1546 // CHECK2:       omp_offload.cont:
1547 // CHECK2-NEXT:    ret void
1548 //
1549 //
1550 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
1551 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
1552 // CHECK2-NEXT:  entry:
1553 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
1554 // CHECK2-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
1555 // CHECK2-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
1556 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
1557 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1558 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
1559 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
1560 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
1561 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]])
1562 // CHECK2-NEXT:    ret void
1563 //
1564 //
1565 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13
1566 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR1]] {
1567 // CHECK2-NEXT:  entry:
1568 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1569 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1570 // CHECK2-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
1571 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1572 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1573 // CHECK2-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
1574 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
1575 // CHECK2-NEXT:    ret void
1576 //
1577 //
1578 // CHECK2-LABEL: define {{[^@]+}}@_Z8mapArrayv
1579 // CHECK2-SAME: () #[[ATTR0]] {
1580 // CHECK2-NEXT:  entry:
1581 // CHECK2-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
1582 // CHECK2-NEXT:    [[Y:%.*]] = alloca [88 x i32], align 4
1583 // CHECK2-NEXT:    [[Z:%.*]] = alloca [99 x i32], align 4
1584 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1585 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1586 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1587 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
1588 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
1589 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
1590 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1591 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
1592 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 8
1593 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1594 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
1595 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 8
1596 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1597 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1598 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1599 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
1600 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 8
1601 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1602 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
1603 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 8
1604 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1605 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
1606 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1607 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
1608 // CHECK2-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 8
1609 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1610 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
1611 // CHECK2-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 8
1612 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1613 // CHECK2-NEXT:    store i8* null, i8** [[TMP14]], align 8
1614 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1615 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1616 // CHECK2-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1617 // CHECK2-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
1618 // CHECK2-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1619 // CHECK2:       omp_offload.failed:
1620 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
1621 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1622 // CHECK2:       omp_offload.cont:
1623 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
1624 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [88 x i32]**
1625 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP20]], align 8
1626 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
1627 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [88 x i32]**
1628 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP22]], align 8
1629 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
1630 // CHECK2-NEXT:    store i8* null, i8** [[TMP23]], align 8
1631 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
1632 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [99 x i32]**
1633 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP25]], align 8
1634 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
1635 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to [99 x i32]**
1636 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP27]], align 8
1637 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
1638 // CHECK2-NEXT:    store i8* null, i8** [[TMP28]], align 8
1639 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
1640 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [77 x i32]**
1641 // CHECK2-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP30]], align 8
1642 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
1643 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [77 x i32]**
1644 // CHECK2-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP32]], align 8
1645 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
1646 // CHECK2-NEXT:    store i8* null, i8** [[TMP33]], align 8
1647 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
1648 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
1649 // CHECK2-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1650 // CHECK2-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1651 // CHECK2-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
1652 // CHECK2:       omp_offload.failed4:
1653 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
1654 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
1655 // CHECK2:       omp_offload.cont5:
1656 // CHECK2-NEXT:    ret void
1657 //
1658 //
1659 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
1660 // CHECK2-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
1661 // CHECK2-NEXT:  entry:
1662 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
1663 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
1664 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
1665 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
1666 // CHECK2-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
1667 // CHECK2-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
1668 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
1669 // CHECK2-NEXT:    ret void
1670 //
1671 //
1672 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16
1673 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
1674 // CHECK2-NEXT:  entry:
1675 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1676 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1677 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
1678 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
1679 // CHECK2-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
1680 // CHECK2-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
1681 // CHECK2-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
1682 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1683 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1684 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1685 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
1686 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
1687 // CHECK2-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
1688 // CHECK2-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
1689 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
1690 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
1691 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
1692 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
1693 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
1694 // CHECK2-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
1695 // CHECK2-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
1696 // CHECK2:       omp.arrayinit.body:
1697 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
1698 // CHECK2-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1699 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1700 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
1701 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
1702 // CHECK2:       omp.arrayinit.done:
1703 // CHECK2-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
1704 // CHECK2-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
1705 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1706 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
1707 // CHECK2-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
1708 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1709 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
1710 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1711 // CHECK2-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
1712 // CHECK2-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1713 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1714 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1715 // CHECK2-NEXT:    ]
1716 // CHECK2:       .omp.reduction.case1:
1717 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
1718 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
1719 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1720 // CHECK2:       omp.arraycpy.body:
1721 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1722 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1723 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
1724 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
1725 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1726 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
1727 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
1728 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1729 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
1730 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
1731 // CHECK2:       omp.arraycpy.done6:
1732 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1733 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1734 // CHECK2:       .omp.reduction.case2:
1735 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
1736 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
1737 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
1738 // CHECK2:       omp.arraycpy.body8:
1739 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
1740 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
1741 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
1742 // CHECK2-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
1743 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
1744 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
1745 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
1746 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
1747 // CHECK2:       omp.arraycpy.done14:
1748 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1749 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1750 // CHECK2:       .omp.reduction.default:
1751 // CHECK2-NEXT:    ret void
1752 //
1753 //
1754 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
1755 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1756 // CHECK2-NEXT:  entry:
1757 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1758 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1759 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1760 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1761 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1762 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1763 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1764 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1765 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1766 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1767 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1768 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1769 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1770 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1771 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
1772 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
1773 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1774 // CHECK2:       omp.arraycpy.body:
1775 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1776 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1777 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1778 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
1779 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
1780 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1781 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1782 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1783 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
1784 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
1785 // CHECK2:       omp.arraycpy.done2:
1786 // CHECK2-NEXT:    ret void
1787 //
1788 //
1789 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
1790 // CHECK2-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
1791 // CHECK2-NEXT:  entry:
1792 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
1793 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
1794 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
1795 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
1796 // CHECK2-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
1797 // CHECK2-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
1798 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
1799 // CHECK2-NEXT:    ret void
1800 //
1801 //
1802 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..20
1803 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
1804 // CHECK2-NEXT:  entry:
1805 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1806 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1807 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
1808 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
1809 // CHECK2-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
1810 // CHECK2-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
1811 // CHECK2-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
1812 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1813 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1814 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1815 // CHECK2-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
1816 // CHECK2-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
1817 // CHECK2-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
1818 // CHECK2-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
1819 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
1820 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
1821 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
1822 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
1823 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
1824 // CHECK2-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
1825 // CHECK2-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
1826 // CHECK2:       omp.arrayinit.body:
1827 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
1828 // CHECK2-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1829 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1830 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
1831 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
1832 // CHECK2:       omp.arrayinit.done:
1833 // CHECK2-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
1834 // CHECK2-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
1835 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1836 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
1837 // CHECK2-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
1838 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1839 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
1840 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1841 // CHECK2-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
1842 // CHECK2-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1843 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1844 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1845 // CHECK2-NEXT:    ]
1846 // CHECK2:       .omp.reduction.case1:
1847 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
1848 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
1849 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1850 // CHECK2:       omp.arraycpy.body:
1851 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1852 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1853 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
1854 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
1855 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1856 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
1857 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
1858 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1859 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
1860 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
1861 // CHECK2:       omp.arraycpy.done6:
1862 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1863 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1864 // CHECK2:       .omp.reduction.case2:
1865 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
1866 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
1867 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
1868 // CHECK2:       omp.arraycpy.body8:
1869 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
1870 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
1871 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
1872 // CHECK2-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
1873 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
1874 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
1875 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
1876 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
1877 // CHECK2:       omp.arraycpy.done14:
1878 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1879 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1880 // CHECK2:       .omp.reduction.default:
1881 // CHECK2-NEXT:    ret void
1882 //
1883 //
1884 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
1885 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
1886 // CHECK2-NEXT:  entry:
1887 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1888 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1889 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1890 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1891 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1892 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1893 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1894 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1895 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1896 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1897 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1898 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1899 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1900 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1901 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
1902 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
1903 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1904 // CHECK2:       omp.arraycpy.body:
1905 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1906 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1907 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1908 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
1909 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
1910 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
1911 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1912 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1913 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
1914 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
1915 // CHECK2:       omp.arraycpy.done2:
1916 // CHECK2-NEXT:    ret void
1917 //
1918 //
1919 // CHECK2-LABEL: define {{[^@]+}}@_Z9mapInt128v
1920 // CHECK2-SAME: () #[[ATTR0]] {
1921 // CHECK2-NEXT:  entry:
1922 // CHECK2-NEXT:    [[X:%.*]] = alloca i128, align 16
1923 // CHECK2-NEXT:    [[Y:%.*]] = alloca i128, align 16
1924 // CHECK2-NEXT:    [[Z:%.*]] = alloca i128, align 16
1925 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1926 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1927 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1928 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
1929 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
1930 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
1931 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1932 // CHECK2-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i128**
1933 // CHECK2-NEXT:    store i128* [[Y]], i128** [[TMP1]], align 8
1934 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1935 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i128**
1936 // CHECK2-NEXT:    store i128* [[Y]], i128** [[TMP3]], align 8
1937 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1938 // CHECK2-NEXT:    store i8* null, i8** [[TMP4]], align 8
1939 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1940 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i128**
1941 // CHECK2-NEXT:    store i128* [[Z]], i128** [[TMP6]], align 8
1942 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1943 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i128**
1944 // CHECK2-NEXT:    store i128* [[Z]], i128** [[TMP8]], align 8
1945 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1946 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
1947 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1948 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i128**
1949 // CHECK2-NEXT:    store i128* [[X]], i128** [[TMP11]], align 8
1950 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1951 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i128**
1952 // CHECK2-NEXT:    store i128* [[X]], i128** [[TMP13]], align 8
1953 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1954 // CHECK2-NEXT:    store i8* null, i8** [[TMP14]], align 8
1955 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1956 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1957 // CHECK2-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.26, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.27, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1958 // CHECK2-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
1959 // CHECK2-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1960 // CHECK2:       omp_offload.failed:
1961 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
1962 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1963 // CHECK2:       omp_offload.cont:
1964 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
1965 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i128**
1966 // CHECK2-NEXT:    store i128* [[Y]], i128** [[TMP20]], align 8
1967 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
1968 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i128**
1969 // CHECK2-NEXT:    store i128* [[Y]], i128** [[TMP22]], align 8
1970 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
1971 // CHECK2-NEXT:    store i8* null, i8** [[TMP23]], align 8
1972 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
1973 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i128**
1974 // CHECK2-NEXT:    store i128* [[Z]], i128** [[TMP25]], align 8
1975 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
1976 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i128**
1977 // CHECK2-NEXT:    store i128* [[Z]], i128** [[TMP27]], align 8
1978 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
1979 // CHECK2-NEXT:    store i8* null, i8** [[TMP28]], align 8
1980 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
1981 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i128**
1982 // CHECK2-NEXT:    store i128* [[X]], i128** [[TMP30]], align 8
1983 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
1984 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i128**
1985 // CHECK2-NEXT:    store i128* [[X]], i128** [[TMP32]], align 8
1986 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
1987 // CHECK2-NEXT:    store i8* null, i8** [[TMP33]], align 8
1988 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
1989 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
1990 // CHECK2-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1991 // CHECK2-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1992 // CHECK2-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
1993 // CHECK2:       omp_offload.failed4:
1994 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
1995 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
1996 // CHECK2:       omp_offload.cont5:
1997 // CHECK2-NEXT:    ret void
1998 //
1999 //
2000 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
2001 // CHECK2-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
2002 // CHECK2-NEXT:  entry:
2003 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
2004 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
2005 // CHECK2-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
2006 // CHECK2-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
2007 // CHECK2-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
2008 // CHECK2-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
2009 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
2010 // CHECK2-NEXT:    ret void
2011 //
2012 //
2013 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..24
2014 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
2015 // CHECK2-NEXT:  entry:
2016 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2017 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2018 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
2019 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
2020 // CHECK2-NEXT:    [[Y1:%.*]] = alloca i128, align 16
2021 // CHECK2-NEXT:    [[X:%.*]] = alloca i128, align 16
2022 // CHECK2-NEXT:    [[Z2:%.*]] = alloca i128, align 16
2023 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2024 // CHECK2-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
2025 // CHECK2-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
2026 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i128, align 16
2027 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2028 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2029 // CHECK2-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
2030 // CHECK2-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
2031 // CHECK2-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
2032 // CHECK2-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
2033 // CHECK2-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
2034 // CHECK2-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
2035 // CHECK2-NEXT:    store i128 0, i128* [[Z2]], align 16
2036 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2037 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
2038 // CHECK2-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
2039 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2040 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2041 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2042 // CHECK2-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.25, [8 x i32]* @.gomp_critical_user_.reduction.var)
2043 // CHECK2-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2044 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2045 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2046 // CHECK2-NEXT:    ]
2047 // CHECK2:       .omp.reduction.case1:
2048 // CHECK2-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
2049 // CHECK2-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
2050 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
2051 // CHECK2-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
2052 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2053 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2054 // CHECK2:       .omp.reduction.case2:
2055 // CHECK2-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
2056 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
2057 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
2058 // CHECK2-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0)
2059 // CHECK2-NEXT:    br label [[ATOMIC_CONT:%.*]]
2060 // CHECK2:       atomic_cont:
2061 // CHECK2-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
2062 // CHECK2-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
2063 // CHECK2-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
2064 // CHECK2-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
2065 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
2066 // CHECK2-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
2067 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
2068 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
2069 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
2070 // CHECK2-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0)
2071 // CHECK2-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
2072 // CHECK2:       atomic_exit:
2073 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2074 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2075 // CHECK2:       .omp.reduction.default:
2076 // CHECK2-NEXT:    ret void
2077 //
2078 //
2079 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.25
2080 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2081 // CHECK2-NEXT:  entry:
2082 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2083 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2084 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2085 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2086 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2087 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2088 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2089 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2090 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2091 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2092 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
2093 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2094 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2095 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
2096 // CHECK2-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
2097 // CHECK2-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
2098 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
2099 // CHECK2-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
2100 // CHECK2-NEXT:    ret void
2101 //
2102 //
2103 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
2104 // CHECK2-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
2105 // CHECK2-NEXT:  entry:
2106 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
2107 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
2108 // CHECK2-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
2109 // CHECK2-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
2110 // CHECK2-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
2111 // CHECK2-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
2112 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..28 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
2113 // CHECK2-NEXT:    ret void
2114 //
2115 //
2116 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..28
2117 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
2118 // CHECK2-NEXT:  entry:
2119 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2120 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2121 // CHECK2-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
2122 // CHECK2-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
2123 // CHECK2-NEXT:    [[Y1:%.*]] = alloca i128, align 16
2124 // CHECK2-NEXT:    [[X:%.*]] = alloca i128, align 16
2125 // CHECK2-NEXT:    [[Z2:%.*]] = alloca i128, align 16
2126 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2127 // CHECK2-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
2128 // CHECK2-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
2129 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i128, align 16
2130 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2131 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2132 // CHECK2-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
2133 // CHECK2-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
2134 // CHECK2-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
2135 // CHECK2-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
2136 // CHECK2-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
2137 // CHECK2-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
2138 // CHECK2-NEXT:    store i128 0, i128* [[Z2]], align 16
2139 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2140 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
2141 // CHECK2-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
2142 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2143 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2144 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2145 // CHECK2-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.29, [8 x i32]* @.gomp_critical_user_.reduction.var)
2146 // CHECK2-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2147 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2148 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2149 // CHECK2-NEXT:    ]
2150 // CHECK2:       .omp.reduction.case1:
2151 // CHECK2-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
2152 // CHECK2-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
2153 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
2154 // CHECK2-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
2155 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2156 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2157 // CHECK2:       .omp.reduction.case2:
2158 // CHECK2-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
2159 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
2160 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
2161 // CHECK2-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0)
2162 // CHECK2-NEXT:    br label [[ATOMIC_CONT:%.*]]
2163 // CHECK2:       atomic_cont:
2164 // CHECK2-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
2165 // CHECK2-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
2166 // CHECK2-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
2167 // CHECK2-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
2168 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
2169 // CHECK2-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
2170 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
2171 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
2172 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
2173 // CHECK2-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0)
2174 // CHECK2-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
2175 // CHECK2:       atomic_exit:
2176 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2177 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2178 // CHECK2:       .omp.reduction.default:
2179 // CHECK2-NEXT:    ret void
2180 //
2181 //
2182 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.29
2183 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2184 // CHECK2-NEXT:  entry:
2185 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2186 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2187 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2188 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2189 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2190 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2191 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2192 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2193 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2194 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2195 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
2196 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2197 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2198 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
2199 // CHECK2-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
2200 // CHECK2-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
2201 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
2202 // CHECK2-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
2203 // CHECK2-NEXT:    ret void
2204 //
2205 //
2206 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2207 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] {
2208 // CHECK2-NEXT:  entry:
2209 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
2210 // CHECK2-NEXT:    ret void
2211 //
2212 //
2213 // CHECK3-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
2214 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
2215 // CHECK3-NEXT:  entry:
2216 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2217 // CHECK3-NEXT:    [[Y:%.*]] = alloca i32, align 4
2218 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
2219 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
2220 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
2221 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2222 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2223 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2224 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2225 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2226 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2227 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2228 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2229 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2230 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
2231 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
2232 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2233 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
2234 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
2235 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
2236 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
2237 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2238 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2239 // CHECK3-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2240 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2241 // CHECK3-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2242 // CHECK3:       omp_offload.failed:
2243 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
2244 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2245 // CHECK3:       omp_offload.cont:
2246 // CHECK3-NEXT:    ret void
2247 //
2248 //
2249 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
2250 // CHECK3-SAME: () #[[ATTR1:[0-9]+]] {
2251 // CHECK3-NEXT:  entry:
2252 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2253 // CHECK3-NEXT:    ret void
2254 //
2255 //
2256 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
2257 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
2258 // CHECK3-NEXT:  entry:
2259 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2260 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2261 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2262 // CHECK3-NEXT:    [[Y:%.*]] = alloca i32, align 4
2263 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2264 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2265 // CHECK3-NEXT:    ret void
2266 //
2267 //
2268 // CHECK3-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
2269 // CHECK3-SAME: () #[[ATTR0]] {
2270 // CHECK3-NEXT:  entry:
2271 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2272 // CHECK3-NEXT:    [[Y:%.*]] = alloca i32, align 4
2273 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
2274 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
2275 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
2276 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2277 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2278 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2279 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2280 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2281 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2282 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2283 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2284 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2285 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
2286 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
2287 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2288 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
2289 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
2290 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
2291 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
2292 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2293 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2294 // CHECK3-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2295 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2296 // CHECK3-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2297 // CHECK3:       omp_offload.failed:
2298 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
2299 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2300 // CHECK3:       omp_offload.cont:
2301 // CHECK3-NEXT:    ret void
2302 //
2303 //
2304 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
2305 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
2306 // CHECK3-NEXT:  entry:
2307 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2308 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
2309 // CHECK3-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
2310 // CHECK3-NEXT:    [[Y_CASTED:%.*]] = alloca i32, align 4
2311 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2312 // CHECK3-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
2313 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2314 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
2315 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
2316 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[X_CASTED]], align 4
2317 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
2318 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
2319 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
2320 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
2321 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
2322 // CHECK3-NEXT:    ret void
2323 //
2324 //
2325 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
2326 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR1]] {
2327 // CHECK3-NEXT:  entry:
2328 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2329 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2330 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2331 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca i32, align 4
2332 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2333 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2334 // CHECK3-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
2335 // CHECK3-NEXT:    store i32 [[Y]], i32* [[Y_ADDR]], align 4
2336 // CHECK3-NEXT:    ret void
2337 //
2338 //
2339 // CHECK3-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
2340 // CHECK3-SAME: () #[[ATTR0]] {
2341 // CHECK3-NEXT:  entry:
2342 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2343 // CHECK3-NEXT:    [[Y:%.*]] = alloca i32, align 4
2344 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
2345 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
2346 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
2347 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2348 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2349 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2350 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2351 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2352 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2353 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2354 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2355 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2356 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
2357 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
2358 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2359 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
2360 // CHECK3-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
2361 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
2362 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
2363 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2364 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2365 // CHECK3-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2366 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2367 // CHECK3-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2368 // CHECK3:       omp_offload.failed:
2369 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
2370 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2371 // CHECK3:       omp_offload.cont:
2372 // CHECK3-NEXT:    ret void
2373 //
2374 //
2375 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
2376 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
2377 // CHECK3-NEXT:  entry:
2378 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2379 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
2380 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2381 // CHECK3-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
2382 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2383 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
2384 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
2385 // CHECK3-NEXT:    ret void
2386 //
2387 //
2388 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
2389 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
2390 // CHECK3-NEXT:  entry:
2391 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2392 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2393 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2394 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
2395 // CHECK3-NEXT:    [[X1:%.*]] = alloca i32, align 4
2396 // CHECK3-NEXT:    [[Y2:%.*]] = alloca i32, align 4
2397 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2398 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2399 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2400 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2401 // CHECK3-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
2402 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2403 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
2404 // CHECK3-NEXT:    store i32 0, i32* [[X1]], align 4
2405 // CHECK3-NEXT:    store i32 0, i32* [[Y2]], align 4
2406 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2407 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
2408 // CHECK3-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 4
2409 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2410 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
2411 // CHECK3-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 4
2412 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2413 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2414 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2415 // CHECK3-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2416 // CHECK3-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2417 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2418 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2419 // CHECK3-NEXT:    ]
2420 // CHECK3:       .omp.reduction.case1:
2421 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
2422 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
2423 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2424 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
2425 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
2426 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
2427 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2428 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
2429 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2430 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2431 // CHECK3:       .omp.reduction.case2:
2432 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
2433 // CHECK3-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
2434 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
2435 // CHECK3-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
2436 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2437 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2438 // CHECK3:       .omp.reduction.default:
2439 // CHECK3-NEXT:    ret void
2440 //
2441 //
2442 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2443 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
2444 // CHECK3-NEXT:  entry:
2445 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2446 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2447 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2448 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2449 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2450 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
2451 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2452 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
2453 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
2454 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2455 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2456 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
2457 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2458 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2459 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
2460 // CHECK3-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
2461 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
2462 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
2463 // CHECK3-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
2464 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
2465 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
2466 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
2467 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
2468 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
2469 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
2470 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
2471 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2472 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
2473 // CHECK3-NEXT:    ret void
2474 //
2475 //
2476 // CHECK3-LABEL: define {{[^@]+}}@_Z7mapFromv
2477 // CHECK3-SAME: () #[[ATTR0]] {
2478 // CHECK3-NEXT:  entry:
2479 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2480 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
2481 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
2482 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
2483 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2484 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2485 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2486 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2487 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2488 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2489 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2490 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2491 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2492 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2493 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2494 // CHECK3-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
2495 // CHECK3-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2496 // CHECK3:       omp_offload.failed:
2497 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
2498 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2499 // CHECK3:       omp_offload.cont:
2500 // CHECK3-NEXT:    ret void
2501 //
2502 //
2503 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
2504 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
2505 // CHECK3-NEXT:  entry:
2506 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2507 // CHECK3-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
2508 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2509 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2510 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2511 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
2512 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
2513 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP2]])
2514 // CHECK3-NEXT:    ret void
2515 //
2516 //
2517 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
2518 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
2519 // CHECK3-NEXT:  entry:
2520 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2521 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2522 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2523 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2524 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2525 // CHECK3-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
2526 // CHECK3-NEXT:    ret void
2527 //
2528 //
2529 // CHECK3-LABEL: define {{[^@]+}}@_Z5mapTov
2530 // CHECK3-SAME: () #[[ATTR0]] {
2531 // CHECK3-NEXT:  entry:
2532 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2533 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
2534 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
2535 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
2536 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2537 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2538 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2539 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2540 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2541 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2542 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2543 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2544 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2545 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2546 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2547 // CHECK3-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
2548 // CHECK3-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2549 // CHECK3:       omp_offload.failed:
2550 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
2551 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2552 // CHECK3:       omp_offload.cont:
2553 // CHECK3-NEXT:    ret void
2554 //
2555 //
2556 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
2557 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
2558 // CHECK3-NEXT:  entry:
2559 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2560 // CHECK3-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
2561 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2562 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2563 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2564 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
2565 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
2566 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP2]])
2567 // CHECK3-NEXT:    ret void
2568 //
2569 //
2570 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
2571 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
2572 // CHECK3-NEXT:  entry:
2573 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2574 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2575 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2576 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2577 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2578 // CHECK3-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
2579 // CHECK3-NEXT:    ret void
2580 //
2581 //
2582 // CHECK3-LABEL: define {{[^@]+}}@_Z8mapAllocv
2583 // CHECK3-SAME: () #[[ATTR0]] {
2584 // CHECK3-NEXT:  entry:
2585 // CHECK3-NEXT:    [[X:%.*]] = alloca i32, align 4
2586 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
2587 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
2588 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
2589 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2590 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2591 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2592 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2593 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2594 // CHECK3-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2595 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2596 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2597 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2598 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2599 // CHECK3-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2600 // CHECK3-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
2601 // CHECK3-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2602 // CHECK3:       omp_offload.failed:
2603 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
2604 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2605 // CHECK3:       omp_offload.cont:
2606 // CHECK3-NEXT:    ret void
2607 //
2608 //
2609 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
2610 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
2611 // CHECK3-NEXT:  entry:
2612 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
2613 // CHECK3-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
2614 // CHECK3-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
2615 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
2616 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2617 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
2618 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
2619 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]])
2620 // CHECK3-NEXT:    ret void
2621 //
2622 //
2623 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13
2624 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
2625 // CHECK3-NEXT:  entry:
2626 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2627 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2628 // CHECK3-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2629 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2630 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2631 // CHECK3-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
2632 // CHECK3-NEXT:    ret void
2633 //
2634 //
2635 // CHECK3-LABEL: define {{[^@]+}}@_Z8mapArrayv
2636 // CHECK3-SAME: () #[[ATTR0]] {
2637 // CHECK3-NEXT:  entry:
2638 // CHECK3-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
2639 // CHECK3-NEXT:    [[Y:%.*]] = alloca [88 x i32], align 4
2640 // CHECK3-NEXT:    [[Z:%.*]] = alloca [99 x i32], align 4
2641 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
2642 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
2643 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
2644 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 4
2645 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 4
2646 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 4
2647 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2648 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
2649 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 4
2650 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2651 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
2652 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 4
2653 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2654 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
2655 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2656 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
2657 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 4
2658 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2659 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
2660 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 4
2661 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
2662 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
2663 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
2664 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
2665 // CHECK3-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 4
2666 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
2667 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
2668 // CHECK3-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 4
2669 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
2670 // CHECK3-NEXT:    store i8* null, i8** [[TMP14]], align 4
2671 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2672 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2673 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2674 // CHECK3-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
2675 // CHECK3-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2676 // CHECK3:       omp_offload.failed:
2677 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
2678 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2679 // CHECK3:       omp_offload.cont:
2680 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
2681 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [88 x i32]**
2682 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP20]], align 4
2683 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
2684 // CHECK3-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [88 x i32]**
2685 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP22]], align 4
2686 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
2687 // CHECK3-NEXT:    store i8* null, i8** [[TMP23]], align 4
2688 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
2689 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [99 x i32]**
2690 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP25]], align 4
2691 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
2692 // CHECK3-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to [99 x i32]**
2693 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP27]], align 4
2694 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 1
2695 // CHECK3-NEXT:    store i8* null, i8** [[TMP28]], align 4
2696 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
2697 // CHECK3-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [77 x i32]**
2698 // CHECK3-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP30]], align 4
2699 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
2700 // CHECK3-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [77 x i32]**
2701 // CHECK3-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP32]], align 4
2702 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 2
2703 // CHECK3-NEXT:    store i8* null, i8** [[TMP33]], align 4
2704 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
2705 // CHECK3-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
2706 // CHECK3-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2707 // CHECK3-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
2708 // CHECK3-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
2709 // CHECK3:       omp_offload.failed4:
2710 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
2711 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
2712 // CHECK3:       omp_offload.cont5:
2713 // CHECK3-NEXT:    ret void
2714 //
2715 //
2716 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
2717 // CHECK3-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
2718 // CHECK3-NEXT:  entry:
2719 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
2720 // CHECK3-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
2721 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
2722 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
2723 // CHECK3-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
2724 // CHECK3-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
2725 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
2726 // CHECK3-NEXT:    ret void
2727 //
2728 //
2729 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16
2730 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
2731 // CHECK3-NEXT:  entry:
2732 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2733 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2734 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
2735 // CHECK3-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
2736 // CHECK3-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
2737 // CHECK3-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
2738 // CHECK3-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
2739 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2740 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2741 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2742 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
2743 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
2744 // CHECK3-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
2745 // CHECK3-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
2746 // CHECK3-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
2747 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
2748 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
2749 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
2750 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
2751 // CHECK3-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
2752 // CHECK3-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
2753 // CHECK3:       omp.arrayinit.body:
2754 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
2755 // CHECK3-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2756 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2757 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
2758 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
2759 // CHECK3:       omp.arrayinit.done:
2760 // CHECK3-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
2761 // CHECK3-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
2762 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2763 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
2764 // CHECK3-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
2765 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2766 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
2767 // CHECK3-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2768 // CHECK3-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
2769 // CHECK3-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2770 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2771 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2772 // CHECK3-NEXT:    ]
2773 // CHECK3:       .omp.reduction.case1:
2774 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
2775 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
2776 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2777 // CHECK3:       omp.arraycpy.body:
2778 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2779 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2780 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
2781 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
2782 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2783 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
2784 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
2785 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2786 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
2787 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
2788 // CHECK3:       omp.arraycpy.done6:
2789 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2790 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2791 // CHECK3:       .omp.reduction.case2:
2792 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
2793 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
2794 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
2795 // CHECK3:       omp.arraycpy.body8:
2796 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
2797 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
2798 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
2799 // CHECK3-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
2800 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
2801 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
2802 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
2803 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
2804 // CHECK3:       omp.arraycpy.done14:
2805 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2806 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2807 // CHECK3:       .omp.reduction.default:
2808 // CHECK3-NEXT:    ret void
2809 //
2810 //
2811 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
2812 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2813 // CHECK3-NEXT:  entry:
2814 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2815 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2816 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2817 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2818 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2819 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2820 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2821 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2822 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2823 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2824 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2825 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2826 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2827 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2828 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
2829 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
2830 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2831 // CHECK3:       omp.arraycpy.body:
2832 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2833 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2834 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2835 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
2836 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
2837 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2838 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2839 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2840 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
2841 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
2842 // CHECK3:       omp.arraycpy.done2:
2843 // CHECK3-NEXT:    ret void
2844 //
2845 //
2846 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
2847 // CHECK3-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
2848 // CHECK3-NEXT:  entry:
2849 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
2850 // CHECK3-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
2851 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
2852 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
2853 // CHECK3-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
2854 // CHECK3-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
2855 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
2856 // CHECK3-NEXT:    ret void
2857 //
2858 //
2859 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..20
2860 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
2861 // CHECK3-NEXT:  entry:
2862 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2863 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2864 // CHECK3-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
2865 // CHECK3-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
2866 // CHECK3-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
2867 // CHECK3-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
2868 // CHECK3-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
2869 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2870 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2871 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2872 // CHECK3-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
2873 // CHECK3-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
2874 // CHECK3-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
2875 // CHECK3-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
2876 // CHECK3-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
2877 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
2878 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
2879 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
2880 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
2881 // CHECK3-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
2882 // CHECK3-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
2883 // CHECK3:       omp.arrayinit.body:
2884 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
2885 // CHECK3-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2886 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2887 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
2888 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
2889 // CHECK3:       omp.arrayinit.done:
2890 // CHECK3-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
2891 // CHECK3-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
2892 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2893 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
2894 // CHECK3-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
2895 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2896 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
2897 // CHECK3-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2898 // CHECK3-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
2899 // CHECK3-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2900 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2901 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2902 // CHECK3-NEXT:    ]
2903 // CHECK3:       .omp.reduction.case1:
2904 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
2905 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
2906 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2907 // CHECK3:       omp.arraycpy.body:
2908 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2909 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2910 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
2911 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
2912 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2913 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
2914 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
2915 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2916 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
2917 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
2918 // CHECK3:       omp.arraycpy.done6:
2919 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2920 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2921 // CHECK3:       .omp.reduction.case2:
2922 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
2923 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
2924 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
2925 // CHECK3:       omp.arraycpy.body8:
2926 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
2927 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
2928 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
2929 // CHECK3-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
2930 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
2931 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
2932 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
2933 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
2934 // CHECK3:       omp.arraycpy.done14:
2935 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2936 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2937 // CHECK3:       .omp.reduction.default:
2938 // CHECK3-NEXT:    ret void
2939 //
2940 //
2941 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
2942 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
2943 // CHECK3-NEXT:  entry:
2944 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
2945 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
2946 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2947 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
2948 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2949 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2950 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
2951 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2952 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2953 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2954 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
2955 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
2956 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
2957 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
2958 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
2959 // CHECK3-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
2960 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2961 // CHECK3:       omp.arraycpy.body:
2962 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2963 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2964 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2965 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
2966 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
2967 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
2968 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2969 // CHECK3-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2970 // CHECK3-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
2971 // CHECK3-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
2972 // CHECK3:       omp.arraycpy.done2:
2973 // CHECK3-NEXT:    ret void
2974 //
2975 //
2976 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2977 // CHECK3-SAME: () #[[ATTR6:[0-9]+]] {
2978 // CHECK3-NEXT:  entry:
2979 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
2980 // CHECK3-NEXT:    ret void
2981 //
2982 //
2983 // CHECK4-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
2984 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
2985 // CHECK4-NEXT:  entry:
2986 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
2987 // CHECK4-NEXT:    [[Y:%.*]] = alloca i32, align 4
2988 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
2989 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
2990 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
2991 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2992 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
2993 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
2994 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2995 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
2996 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
2997 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2998 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
2999 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3000 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
3001 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
3002 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3003 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
3004 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
3005 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3006 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
3007 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3008 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3009 // CHECK4-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3010 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3011 // CHECK4-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3012 // CHECK4:       omp_offload.failed:
3013 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
3014 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3015 // CHECK4:       omp_offload.cont:
3016 // CHECK4-NEXT:    ret void
3017 //
3018 //
3019 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
3020 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
3021 // CHECK4-NEXT:  entry:
3022 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
3023 // CHECK4-NEXT:    ret void
3024 //
3025 //
3026 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
3027 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
3028 // CHECK4-NEXT:  entry:
3029 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3030 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3031 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3032 // CHECK4-NEXT:    [[Y:%.*]] = alloca i32, align 4
3033 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3034 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3035 // CHECK4-NEXT:    ret void
3036 //
3037 //
3038 // CHECK4-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
3039 // CHECK4-SAME: () #[[ATTR0]] {
3040 // CHECK4-NEXT:  entry:
3041 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3042 // CHECK4-NEXT:    [[Y:%.*]] = alloca i32, align 4
3043 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
3044 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
3045 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
3046 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3047 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
3048 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
3049 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3050 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
3051 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
3052 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3053 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3054 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3055 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
3056 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
3057 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3058 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
3059 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
3060 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3061 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
3062 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3063 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3064 // CHECK4-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3065 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3066 // CHECK4-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3067 // CHECK4:       omp_offload.failed:
3068 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
3069 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3070 // CHECK4:       omp_offload.cont:
3071 // CHECK4-NEXT:    ret void
3072 //
3073 //
3074 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
3075 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
3076 // CHECK4-NEXT:  entry:
3077 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3078 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
3079 // CHECK4-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
3080 // CHECK4-NEXT:    [[Y_CASTED:%.*]] = alloca i32, align 4
3081 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3082 // CHECK4-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
3083 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3084 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
3085 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
3086 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[X_CASTED]], align 4
3087 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
3088 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
3089 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
3090 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
3091 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
3092 // CHECK4-NEXT:    ret void
3093 //
3094 //
3095 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
3096 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR1]] {
3097 // CHECK4-NEXT:  entry:
3098 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3099 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3100 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
3101 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca i32, align 4
3102 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3103 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3104 // CHECK4-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
3105 // CHECK4-NEXT:    store i32 [[Y]], i32* [[Y_ADDR]], align 4
3106 // CHECK4-NEXT:    ret void
3107 //
3108 //
3109 // CHECK4-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
3110 // CHECK4-SAME: () #[[ATTR0]] {
3111 // CHECK4-NEXT:  entry:
3112 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3113 // CHECK4-NEXT:    [[Y:%.*]] = alloca i32, align 4
3114 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
3115 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
3116 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
3117 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3118 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
3119 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
3120 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3121 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
3122 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
3123 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3124 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3125 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3126 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
3127 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP6]], align 4
3128 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3129 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
3130 // CHECK4-NEXT:    store i32* [[Y]], i32** [[TMP8]], align 4
3131 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3132 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
3133 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3134 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3135 // CHECK4-NEXT:    [[TMP12:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, i32 2, i8** [[TMP10]], i8** [[TMP11]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3136 // CHECK4-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3137 // CHECK4-NEXT:    br i1 [[TMP13]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3138 // CHECK4:       omp_offload.failed:
3139 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
3140 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3141 // CHECK4:       omp_offload.cont:
3142 // CHECK4-NEXT:    ret void
3143 //
3144 //
3145 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
3146 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
3147 // CHECK4-NEXT:  entry:
3148 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3149 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
3150 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3151 // CHECK4-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
3152 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3153 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
3154 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
3155 // CHECK4-NEXT:    ret void
3156 //
3157 //
3158 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
3159 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
3160 // CHECK4-NEXT:  entry:
3161 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3162 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3163 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3164 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
3165 // CHECK4-NEXT:    [[X1:%.*]] = alloca i32, align 4
3166 // CHECK4-NEXT:    [[Y2:%.*]] = alloca i32, align 4
3167 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3168 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3169 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3170 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3171 // CHECK4-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
3172 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3173 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
3174 // CHECK4-NEXT:    store i32 0, i32* [[X1]], align 4
3175 // CHECK4-NEXT:    store i32 0, i32* [[Y2]], align 4
3176 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3177 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
3178 // CHECK4-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 4
3179 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3180 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
3181 // CHECK4-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 4
3182 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3183 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
3184 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3185 // CHECK4-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
3186 // CHECK4-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
3187 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
3188 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
3189 // CHECK4-NEXT:    ]
3190 // CHECK4:       .omp.reduction.case1:
3191 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
3192 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
3193 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3194 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
3195 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
3196 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
3197 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
3198 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
3199 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3200 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3201 // CHECK4:       .omp.reduction.case2:
3202 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
3203 // CHECK4-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
3204 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
3205 // CHECK4-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
3206 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3207 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3208 // CHECK4:       .omp.reduction.default:
3209 // CHECK4-NEXT:    ret void
3210 //
3211 //
3212 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
3213 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
3214 // CHECK4-NEXT:  entry:
3215 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
3216 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
3217 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3218 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
3219 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3220 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
3221 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
3222 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
3223 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3224 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
3225 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
3226 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
3227 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
3228 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
3229 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3230 // CHECK4-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
3231 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
3232 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
3233 // CHECK4-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
3234 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
3235 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
3236 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
3237 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
3238 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
3239 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
3240 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
3241 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
3242 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
3243 // CHECK4-NEXT:    ret void
3244 //
3245 //
3246 // CHECK4-LABEL: define {{[^@]+}}@_Z7mapFromv
3247 // CHECK4-SAME: () #[[ATTR0]] {
3248 // CHECK4-NEXT:  entry:
3249 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3250 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
3251 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
3252 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
3253 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3254 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
3255 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
3256 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3257 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
3258 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
3259 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3260 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3261 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3262 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3263 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3264 // CHECK4-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
3265 // CHECK4-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3266 // CHECK4:       omp_offload.failed:
3267 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
3268 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3269 // CHECK4:       omp_offload.cont:
3270 // CHECK4-NEXT:    ret void
3271 //
3272 //
3273 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
3274 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
3275 // CHECK4-NEXT:  entry:
3276 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3277 // CHECK4-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
3278 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3279 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3280 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3281 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
3282 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
3283 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP2]])
3284 // CHECK4-NEXT:    ret void
3285 //
3286 //
3287 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
3288 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
3289 // CHECK4-NEXT:  entry:
3290 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3291 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3292 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
3293 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3294 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3295 // CHECK4-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
3296 // CHECK4-NEXT:    ret void
3297 //
3298 //
3299 // CHECK4-LABEL: define {{[^@]+}}@_Z5mapTov
3300 // CHECK4-SAME: () #[[ATTR0]] {
3301 // CHECK4-NEXT:  entry:
3302 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3303 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
3304 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
3305 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
3306 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3307 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
3308 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
3309 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3310 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
3311 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
3312 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3313 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3314 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3315 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3316 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3317 // CHECK4-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
3318 // CHECK4-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3319 // CHECK4:       omp_offload.failed:
3320 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
3321 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3322 // CHECK4:       omp_offload.cont:
3323 // CHECK4-NEXT:    ret void
3324 //
3325 //
3326 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
3327 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
3328 // CHECK4-NEXT:  entry:
3329 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3330 // CHECK4-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
3331 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3332 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3333 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3334 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
3335 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
3336 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP2]])
3337 // CHECK4-NEXT:    ret void
3338 //
3339 //
3340 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
3341 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
3342 // CHECK4-NEXT:  entry:
3343 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3344 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3345 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
3346 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3347 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3348 // CHECK4-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
3349 // CHECK4-NEXT:    ret void
3350 //
3351 //
3352 // CHECK4-LABEL: define {{[^@]+}}@_Z8mapAllocv
3353 // CHECK4-SAME: () #[[ATTR0]] {
3354 // CHECK4-NEXT:  entry:
3355 // CHECK4-NEXT:    [[X:%.*]] = alloca i32, align 4
3356 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
3357 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
3358 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
3359 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3360 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
3361 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP1]], align 4
3362 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3363 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
3364 // CHECK4-NEXT:    store i32* [[X]], i32** [[TMP3]], align 4
3365 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3366 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3367 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3368 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3369 // CHECK4-NEXT:    [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3370 // CHECK4-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
3371 // CHECK4-NEXT:    br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3372 // CHECK4:       omp_offload.failed:
3373 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
3374 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3375 // CHECK4:       omp_offload.cont:
3376 // CHECK4-NEXT:    ret void
3377 //
3378 //
3379 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
3380 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
3381 // CHECK4-NEXT:  entry:
3382 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
3383 // CHECK4-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
3384 // CHECK4-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
3385 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
3386 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3387 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
3388 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
3389 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]])
3390 // CHECK4-NEXT:    ret void
3391 //
3392 //
3393 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13
3394 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR1]] {
3395 // CHECK4-NEXT:  entry:
3396 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3397 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3398 // CHECK4-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
3399 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3400 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3401 // CHECK4-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
3402 // CHECK4-NEXT:    ret void
3403 //
3404 //
3405 // CHECK4-LABEL: define {{[^@]+}}@_Z8mapArrayv
3406 // CHECK4-SAME: () #[[ATTR0]] {
3407 // CHECK4-NEXT:  entry:
3408 // CHECK4-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
3409 // CHECK4-NEXT:    [[Y:%.*]] = alloca [88 x i32], align 4
3410 // CHECK4-NEXT:    [[Z:%.*]] = alloca [99 x i32], align 4
3411 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
3412 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
3413 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
3414 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 4
3415 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 4
3416 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 4
3417 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3418 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
3419 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 4
3420 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3421 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
3422 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 4
3423 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3424 // CHECK4-NEXT:    store i8* null, i8** [[TMP4]], align 4
3425 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3426 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
3427 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 4
3428 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3429 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
3430 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 4
3431 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3432 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
3433 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3434 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
3435 // CHECK4-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 4
3436 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3437 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
3438 // CHECK4-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 4
3439 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
3440 // CHECK4-NEXT:    store i8* null, i8** [[TMP14]], align 4
3441 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3442 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3443 // CHECK4-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, i32 3, i8** [[TMP15]], i8** [[TMP16]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3444 // CHECK4-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
3445 // CHECK4-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3446 // CHECK4:       omp_offload.failed:
3447 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
3448 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3449 // CHECK4:       omp_offload.cont:
3450 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
3451 // CHECK4-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [88 x i32]**
3452 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP20]], align 4
3453 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
3454 // CHECK4-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [88 x i32]**
3455 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[TMP22]], align 4
3456 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
3457 // CHECK4-NEXT:    store i8* null, i8** [[TMP23]], align 4
3458 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
3459 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [99 x i32]**
3460 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP25]], align 4
3461 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
3462 // CHECK4-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to [99 x i32]**
3463 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[TMP27]], align 4
3464 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 1
3465 // CHECK4-NEXT:    store i8* null, i8** [[TMP28]], align 4
3466 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
3467 // CHECK4-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [77 x i32]**
3468 // CHECK4-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP30]], align 4
3469 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
3470 // CHECK4-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [77 x i32]**
3471 // CHECK4-NEXT:    store [77 x i32]* [[X]], [77 x i32]** [[TMP32]], align 4
3472 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 2
3473 // CHECK4-NEXT:    store i8* null, i8** [[TMP33]], align 4
3474 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
3475 // CHECK4-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
3476 // CHECK4-NEXT:    [[TMP36:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, i32 3, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3477 // CHECK4-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
3478 // CHECK4-NEXT:    br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]]
3479 // CHECK4:       omp_offload.failed4:
3480 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
3481 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT5]]
3482 // CHECK4:       omp_offload.cont5:
3483 // CHECK4-NEXT:    ret void
3484 //
3485 //
3486 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
3487 // CHECK4-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
3488 // CHECK4-NEXT:  entry:
3489 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
3490 // CHECK4-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
3491 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
3492 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
3493 // CHECK4-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
3494 // CHECK4-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
3495 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
3496 // CHECK4-NEXT:    ret void
3497 //
3498 //
3499 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16
3500 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
3501 // CHECK4-NEXT:  entry:
3502 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3503 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3504 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
3505 // CHECK4-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
3506 // CHECK4-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
3507 // CHECK4-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
3508 // CHECK4-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
3509 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
3510 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3511 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3512 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
3513 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
3514 // CHECK4-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
3515 // CHECK4-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
3516 // CHECK4-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
3517 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
3518 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
3519 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
3520 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
3521 // CHECK4-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
3522 // CHECK4-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
3523 // CHECK4:       omp.arrayinit.body:
3524 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
3525 // CHECK4-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3526 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3527 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
3528 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
3529 // CHECK4:       omp.arrayinit.done:
3530 // CHECK4-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
3531 // CHECK4-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
3532 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3533 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
3534 // CHECK4-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
3535 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3536 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3537 // CHECK4-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3538 // CHECK4-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
3539 // CHECK4-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
3540 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
3541 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
3542 // CHECK4-NEXT:    ]
3543 // CHECK4:       .omp.reduction.case1:
3544 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
3545 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
3546 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3547 // CHECK4:       omp.arraycpy.body:
3548 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3549 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3550 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
3551 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
3552 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
3553 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
3554 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
3555 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3556 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
3557 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
3558 // CHECK4:       omp.arraycpy.done6:
3559 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3560 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3561 // CHECK4:       .omp.reduction.case2:
3562 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
3563 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
3564 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
3565 // CHECK4:       omp.arraycpy.body8:
3566 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
3567 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
3568 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
3569 // CHECK4-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
3570 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
3571 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
3572 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
3573 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
3574 // CHECK4:       omp.arraycpy.done14:
3575 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3576 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3577 // CHECK4:       .omp.reduction.default:
3578 // CHECK4-NEXT:    ret void
3579 //
3580 //
3581 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
3582 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
3583 // CHECK4-NEXT:  entry:
3584 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
3585 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
3586 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3587 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
3588 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3589 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
3590 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
3591 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
3592 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
3593 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
3594 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
3595 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
3596 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
3597 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
3598 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
3599 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
3600 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3601 // CHECK4:       omp.arraycpy.body:
3602 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3603 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3604 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3605 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
3606 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
3607 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3608 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3609 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3610 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
3611 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
3612 // CHECK4:       omp.arraycpy.done2:
3613 // CHECK4-NEXT:    ret void
3614 //
3615 //
3616 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
3617 // CHECK4-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
3618 // CHECK4-NEXT:  entry:
3619 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
3620 // CHECK4-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
3621 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
3622 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
3623 // CHECK4-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
3624 // CHECK4-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
3625 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
3626 // CHECK4-NEXT:    ret void
3627 //
3628 //
3629 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..20
3630 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
3631 // CHECK4-NEXT:  entry:
3632 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3633 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3634 // CHECK4-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
3635 // CHECK4-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
3636 // CHECK4-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
3637 // CHECK4-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
3638 // CHECK4-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
3639 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
3640 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3641 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3642 // CHECK4-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
3643 // CHECK4-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
3644 // CHECK4-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
3645 // CHECK4-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
3646 // CHECK4-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
3647 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
3648 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
3649 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
3650 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
3651 // CHECK4-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
3652 // CHECK4-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
3653 // CHECK4:       omp.arrayinit.body:
3654 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
3655 // CHECK4-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3656 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3657 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
3658 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
3659 // CHECK4:       omp.arrayinit.done:
3660 // CHECK4-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
3661 // CHECK4-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
3662 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3663 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
3664 // CHECK4-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
3665 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3666 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3667 // CHECK4-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3668 // CHECK4-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
3669 // CHECK4-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
3670 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
3671 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
3672 // CHECK4-NEXT:    ]
3673 // CHECK4:       .omp.reduction.case1:
3674 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
3675 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
3676 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3677 // CHECK4:       omp.arraycpy.body:
3678 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3679 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3680 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
3681 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
3682 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
3683 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
3684 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
3685 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3686 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
3687 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
3688 // CHECK4:       omp.arraycpy.done6:
3689 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3690 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3691 // CHECK4:       .omp.reduction.case2:
3692 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
3693 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
3694 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
3695 // CHECK4:       omp.arraycpy.body8:
3696 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
3697 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
3698 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
3699 // CHECK4-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
3700 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
3701 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
3702 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
3703 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
3704 // CHECK4:       omp.arraycpy.done14:
3705 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3706 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3707 // CHECK4:       .omp.reduction.default:
3708 // CHECK4-NEXT:    ret void
3709 //
3710 //
3711 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
3712 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
3713 // CHECK4-NEXT:  entry:
3714 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
3715 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
3716 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3717 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
3718 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3719 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
3720 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
3721 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
3722 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
3723 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
3724 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
3725 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
3726 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
3727 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
3728 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
3729 // CHECK4-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
3730 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3731 // CHECK4:       omp.arraycpy.body:
3732 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3733 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3734 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3735 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
3736 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
3737 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
3738 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3739 // CHECK4-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3740 // CHECK4-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
3741 // CHECK4-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
3742 // CHECK4:       omp.arraycpy.done2:
3743 // CHECK4-NEXT:    ret void
3744 //
3745 //
3746 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3747 // CHECK4-SAME: () #[[ATTR6:[0-9]+]] {
3748 // CHECK4-NEXT:  entry:
3749 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
3750 // CHECK4-NEXT:    ret void
3751 //
3752 //
3753 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
3754 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
3755 // CHECK5-NEXT:  entry:
3756 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
3757 // CHECK5-NEXT:    ret void
3758 //
3759 //
3760 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
3761 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3762 // CHECK5-NEXT:  entry:
3763 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3764 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3765 // CHECK5-NEXT:    [[X:%.*]] = alloca i32, align 4
3766 // CHECK5-NEXT:    [[Y:%.*]] = alloca i32, align 4
3767 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3768 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3769 // CHECK5-NEXT:    ret void
3770 //
3771 //
3772 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
3773 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
3774 // CHECK5-NEXT:  entry:
3775 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3776 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
3777 // CHECK5-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
3778 // CHECK5-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
3779 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3780 // CHECK5-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
3781 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3782 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
3783 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
3784 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
3785 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
3786 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
3787 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
3788 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
3789 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[CONV1]], align 4
3790 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
3791 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
3792 // CHECK5-NEXT:    ret void
3793 //
3794 //
3795 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
3796 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
3797 // CHECK5-NEXT:  entry:
3798 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3799 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3800 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
3801 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
3802 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3803 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3804 // CHECK5-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
3805 // CHECK5-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
3806 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
3807 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
3808 // CHECK5-NEXT:    ret void
3809 //
3810 //
3811 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
3812 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
3813 // CHECK5-NEXT:  entry:
3814 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3815 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
3816 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3817 // CHECK5-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
3818 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3819 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
3820 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
3821 // CHECK5-NEXT:    ret void
3822 //
3823 //
3824 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
3825 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
3826 // CHECK5-NEXT:  entry:
3827 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3828 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3829 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3830 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
3831 // CHECK5-NEXT:    [[X1:%.*]] = alloca i32, align 4
3832 // CHECK5-NEXT:    [[Y2:%.*]] = alloca i32, align 4
3833 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
3834 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3835 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3836 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3837 // CHECK5-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
3838 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3839 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
3840 // CHECK5-NEXT:    store i32 0, i32* [[X1]], align 4
3841 // CHECK5-NEXT:    store i32 0, i32* [[Y2]], align 4
3842 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
3843 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
3844 // CHECK5-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 8
3845 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
3846 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
3847 // CHECK5-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
3848 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3849 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
3850 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3851 // CHECK5-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
3852 // CHECK5-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
3853 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
3854 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
3855 // CHECK5-NEXT:    ]
3856 // CHECK5:       .omp.reduction.case1:
3857 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
3858 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
3859 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3860 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
3861 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
3862 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
3863 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
3864 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
3865 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3866 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3867 // CHECK5:       .omp.reduction.case2:
3868 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
3869 // CHECK5-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
3870 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
3871 // CHECK5-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
3872 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
3873 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
3874 // CHECK5:       .omp.reduction.default:
3875 // CHECK5-NEXT:    ret void
3876 //
3877 //
3878 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
3879 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
3880 // CHECK5-NEXT:  entry:
3881 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
3882 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
3883 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
3884 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
3885 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
3886 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
3887 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
3888 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
3889 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
3890 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
3891 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
3892 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
3893 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
3894 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
3895 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
3896 // CHECK5-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
3897 // CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
3898 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
3899 // CHECK5-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
3900 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
3901 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
3902 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
3903 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
3904 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
3905 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
3906 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
3907 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
3908 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
3909 // CHECK5-NEXT:    ret void
3910 //
3911 //
3912 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
3913 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
3914 // CHECK5-NEXT:  entry:
3915 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3916 // CHECK5-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
3917 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3918 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3919 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3920 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
3921 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
3922 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
3923 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP2]])
3924 // CHECK5-NEXT:    ret void
3925 //
3926 //
3927 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
3928 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
3929 // CHECK5-NEXT:  entry:
3930 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3931 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3932 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
3933 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3934 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3935 // CHECK5-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
3936 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
3937 // CHECK5-NEXT:    ret void
3938 //
3939 //
3940 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
3941 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
3942 // CHECK5-NEXT:  entry:
3943 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3944 // CHECK5-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
3945 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3946 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3947 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3948 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
3949 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
3950 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
3951 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]])
3952 // CHECK5-NEXT:    ret void
3953 //
3954 //
3955 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4
3956 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
3957 // CHECK5-NEXT:  entry:
3958 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3959 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3960 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
3961 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3962 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3963 // CHECK5-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
3964 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
3965 // CHECK5-NEXT:    ret void
3966 //
3967 //
3968 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
3969 // CHECK5-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
3970 // CHECK5-NEXT:  entry:
3971 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
3972 // CHECK5-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
3973 // CHECK5-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
3974 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
3975 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3976 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
3977 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
3978 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
3979 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP2]])
3980 // CHECK5-NEXT:    ret void
3981 //
3982 //
3983 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..5
3984 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
3985 // CHECK5-NEXT:  entry:
3986 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3987 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3988 // CHECK5-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
3989 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3990 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3991 // CHECK5-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
3992 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
3993 // CHECK5-NEXT:    ret void
3994 //
3995 //
3996 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
3997 // CHECK5-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
3998 // CHECK5-NEXT:  entry:
3999 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4000 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4001 // CHECK5-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4002 // CHECK5-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4003 // CHECK5-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4004 // CHECK5-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4005 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
4006 // CHECK5-NEXT:    ret void
4007 //
4008 //
4009 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
4010 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4011 // CHECK5-NEXT:  entry:
4012 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4013 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4014 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4015 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4016 // CHECK5-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
4017 // CHECK5-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
4018 // CHECK5-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
4019 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4020 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4021 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4022 // CHECK5-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4023 // CHECK5-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4024 // CHECK5-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4025 // CHECK5-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4026 // CHECK5-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
4027 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
4028 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
4029 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
4030 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
4031 // CHECK5-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
4032 // CHECK5-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
4033 // CHECK5:       omp.arrayinit.body:
4034 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
4035 // CHECK5-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4036 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4037 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
4038 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
4039 // CHECK5:       omp.arrayinit.done:
4040 // CHECK5-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
4041 // CHECK5-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
4042 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4043 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
4044 // CHECK5-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
4045 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4046 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4047 // CHECK5-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4048 // CHECK5-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
4049 // CHECK5-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4050 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4051 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4052 // CHECK5-NEXT:    ]
4053 // CHECK5:       .omp.reduction.case1:
4054 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4055 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
4056 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4057 // CHECK5:       omp.arraycpy.body:
4058 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4059 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4060 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4061 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4062 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4063 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4064 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
4065 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4066 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
4067 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
4068 // CHECK5:       omp.arraycpy.done6:
4069 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4070 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4071 // CHECK5:       .omp.reduction.case2:
4072 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4073 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
4074 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
4075 // CHECK5:       omp.arraycpy.body8:
4076 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4077 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4078 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
4079 // CHECK5-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
4080 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
4081 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
4082 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
4083 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
4084 // CHECK5:       omp.arraycpy.done14:
4085 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4086 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4087 // CHECK5:       .omp.reduction.default:
4088 // CHECK5-NEXT:    ret void
4089 //
4090 //
4091 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
4092 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4093 // CHECK5-NEXT:  entry:
4094 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4095 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4096 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4097 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4098 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4099 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4100 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4101 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4102 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4103 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4104 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
4105 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4106 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4107 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
4108 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
4109 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
4110 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4111 // CHECK5:       omp.arraycpy.body:
4112 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4113 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4114 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4115 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4116 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
4117 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4118 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4119 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4120 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
4121 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
4122 // CHECK5:       omp.arraycpy.done2:
4123 // CHECK5-NEXT:    ret void
4124 //
4125 //
4126 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
4127 // CHECK5-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4128 // CHECK5-NEXT:  entry:
4129 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4130 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4131 // CHECK5-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4132 // CHECK5-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4133 // CHECK5-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4134 // CHECK5-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4135 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
4136 // CHECK5-NEXT:    ret void
4137 //
4138 //
4139 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..8
4140 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4141 // CHECK5-NEXT:  entry:
4142 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4143 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4144 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4145 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4146 // CHECK5-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
4147 // CHECK5-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
4148 // CHECK5-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
4149 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4150 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4151 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4152 // CHECK5-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4153 // CHECK5-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4154 // CHECK5-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4155 // CHECK5-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4156 // CHECK5-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
4157 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
4158 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
4159 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
4160 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
4161 // CHECK5-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
4162 // CHECK5-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
4163 // CHECK5:       omp.arrayinit.body:
4164 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
4165 // CHECK5-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4166 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4167 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
4168 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
4169 // CHECK5:       omp.arrayinit.done:
4170 // CHECK5-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
4171 // CHECK5-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
4172 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4173 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
4174 // CHECK5-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
4175 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4176 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4177 // CHECK5-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4178 // CHECK5-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
4179 // CHECK5-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4180 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4181 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4182 // CHECK5-NEXT:    ]
4183 // CHECK5:       .omp.reduction.case1:
4184 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4185 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
4186 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4187 // CHECK5:       omp.arraycpy.body:
4188 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4189 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4190 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4191 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4192 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4193 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4194 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
4195 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4196 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
4197 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
4198 // CHECK5:       omp.arraycpy.done6:
4199 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4200 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4201 // CHECK5:       .omp.reduction.case2:
4202 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4203 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
4204 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
4205 // CHECK5:       omp.arraycpy.body8:
4206 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4207 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4208 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
4209 // CHECK5-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
4210 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
4211 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
4212 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
4213 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
4214 // CHECK5:       omp.arraycpy.done14:
4215 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4216 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4217 // CHECK5:       .omp.reduction.default:
4218 // CHECK5-NEXT:    ret void
4219 //
4220 //
4221 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
4222 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4223 // CHECK5-NEXT:  entry:
4224 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4225 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4226 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4227 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4228 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4229 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4230 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4231 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4232 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4233 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4234 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
4235 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4236 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4237 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
4238 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
4239 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
4240 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4241 // CHECK5:       omp.arraycpy.body:
4242 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4243 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4244 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4245 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4246 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
4247 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4248 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4249 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4250 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
4251 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
4252 // CHECK5:       omp.arraycpy.done2:
4253 // CHECK5-NEXT:    ret void
4254 //
4255 //
4256 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
4257 // CHECK5-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4258 // CHECK5-NEXT:  entry:
4259 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4260 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4261 // CHECK5-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4262 // CHECK5-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4263 // CHECK5-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4264 // CHECK5-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4265 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
4266 // CHECK5-NEXT:    ret void
4267 //
4268 //
4269 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
4270 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4271 // CHECK5-NEXT:  entry:
4272 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4273 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4274 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4275 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4276 // CHECK5-NEXT:    [[Y1:%.*]] = alloca i128, align 16
4277 // CHECK5-NEXT:    [[X:%.*]] = alloca i128, align 16
4278 // CHECK5-NEXT:    [[Z2:%.*]] = alloca i128, align 16
4279 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4280 // CHECK5-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
4281 // CHECK5-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
4282 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i128, align 16
4283 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4284 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4285 // CHECK5-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4286 // CHECK5-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4287 // CHECK5-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4288 // CHECK5-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4289 // CHECK5-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
4290 // CHECK5-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
4291 // CHECK5-NEXT:    store i128 0, i128* [[Z2]], align 16
4292 // CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4293 // CHECK5-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
4294 // CHECK5-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
4295 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4296 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4297 // CHECK5-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4298 // CHECK5-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var)
4299 // CHECK5-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4300 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4301 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4302 // CHECK5-NEXT:    ]
4303 // CHECK5:       .omp.reduction.case1:
4304 // CHECK5-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
4305 // CHECK5-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
4306 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
4307 // CHECK5-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
4308 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4309 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4310 // CHECK5:       .omp.reduction.case2:
4311 // CHECK5-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
4312 // CHECK5-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
4313 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
4314 // CHECK5-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0) #[[ATTR6:[0-9]+]]
4315 // CHECK5-NEXT:    br label [[ATOMIC_CONT:%.*]]
4316 // CHECK5:       atomic_cont:
4317 // CHECK5-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
4318 // CHECK5-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
4319 // CHECK5-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
4320 // CHECK5-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
4321 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
4322 // CHECK5-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
4323 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
4324 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
4325 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
4326 // CHECK5-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0) #[[ATTR6]]
4327 // CHECK5-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
4328 // CHECK5:       atomic_exit:
4329 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4330 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4331 // CHECK5:       .omp.reduction.default:
4332 // CHECK5-NEXT:    ret void
4333 //
4334 //
4335 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11
4336 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4337 // CHECK5-NEXT:  entry:
4338 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4339 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4340 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4341 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4342 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4343 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4344 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4345 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4346 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4347 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4348 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
4349 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4350 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4351 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
4352 // CHECK5-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
4353 // CHECK5-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
4354 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
4355 // CHECK5-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
4356 // CHECK5-NEXT:    ret void
4357 //
4358 //
4359 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
4360 // CHECK5-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4361 // CHECK5-NEXT:  entry:
4362 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4363 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4364 // CHECK5-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4365 // CHECK5-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4366 // CHECK5-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4367 // CHECK5-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4368 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
4369 // CHECK5-NEXT:    ret void
4370 //
4371 //
4372 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..12
4373 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4374 // CHECK5-NEXT:  entry:
4375 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4376 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4377 // CHECK5-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4378 // CHECK5-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4379 // CHECK5-NEXT:    [[Y1:%.*]] = alloca i128, align 16
4380 // CHECK5-NEXT:    [[X:%.*]] = alloca i128, align 16
4381 // CHECK5-NEXT:    [[Z2:%.*]] = alloca i128, align 16
4382 // CHECK5-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4383 // CHECK5-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
4384 // CHECK5-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
4385 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i128, align 16
4386 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4387 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4388 // CHECK5-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4389 // CHECK5-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4390 // CHECK5-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4391 // CHECK5-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4392 // CHECK5-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
4393 // CHECK5-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
4394 // CHECK5-NEXT:    store i128 0, i128* [[Z2]], align 16
4395 // CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4396 // CHECK5-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
4397 // CHECK5-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
4398 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4399 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
4400 // CHECK5-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4401 // CHECK5-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.13, [8 x i32]* @.gomp_critical_user_.reduction.var)
4402 // CHECK5-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4403 // CHECK5-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4404 // CHECK5-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4405 // CHECK5-NEXT:    ]
4406 // CHECK5:       .omp.reduction.case1:
4407 // CHECK5-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
4408 // CHECK5-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
4409 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
4410 // CHECK5-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
4411 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4412 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4413 // CHECK5:       .omp.reduction.case2:
4414 // CHECK5-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
4415 // CHECK5-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
4416 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
4417 // CHECK5-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0) #[[ATTR6]]
4418 // CHECK5-NEXT:    br label [[ATOMIC_CONT:%.*]]
4419 // CHECK5:       atomic_cont:
4420 // CHECK5-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
4421 // CHECK5-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
4422 // CHECK5-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
4423 // CHECK5-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
4424 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
4425 // CHECK5-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
4426 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
4427 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
4428 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
4429 // CHECK5-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0) #[[ATTR6]]
4430 // CHECK5-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
4431 // CHECK5:       atomic_exit:
4432 // CHECK5-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4433 // CHECK5-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4434 // CHECK5:       .omp.reduction.default:
4435 // CHECK5-NEXT:    ret void
4436 //
4437 //
4438 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.13
4439 // CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4440 // CHECK5-NEXT:  entry:
4441 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4442 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4443 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4444 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4445 // CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4446 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4447 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4448 // CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4449 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4450 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4451 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
4452 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4453 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4454 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
4455 // CHECK5-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
4456 // CHECK5-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
4457 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
4458 // CHECK5-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
4459 // CHECK5-NEXT:    ret void
4460 //
4461 //
4462 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
4463 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
4464 // CHECK6-NEXT:  entry:
4465 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
4466 // CHECK6-NEXT:    ret void
4467 //
4468 //
4469 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
4470 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4471 // CHECK6-NEXT:  entry:
4472 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4473 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4474 // CHECK6-NEXT:    [[X:%.*]] = alloca i32, align 4
4475 // CHECK6-NEXT:    [[Y:%.*]] = alloca i32, align 4
4476 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4477 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4478 // CHECK6-NEXT:    ret void
4479 //
4480 //
4481 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
4482 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
4483 // CHECK6-NEXT:  entry:
4484 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4485 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
4486 // CHECK6-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
4487 // CHECK6-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
4488 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4489 // CHECK6-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
4490 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4491 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
4492 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
4493 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
4494 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
4495 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
4496 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
4497 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
4498 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[CONV1]], align 4
4499 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
4500 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
4501 // CHECK6-NEXT:    ret void
4502 //
4503 //
4504 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
4505 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
4506 // CHECK6-NEXT:  entry:
4507 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4508 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4509 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
4510 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
4511 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4512 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4513 // CHECK6-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
4514 // CHECK6-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
4515 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
4516 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
4517 // CHECK6-NEXT:    ret void
4518 //
4519 //
4520 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
4521 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
4522 // CHECK6-NEXT:  entry:
4523 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4524 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
4525 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4526 // CHECK6-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
4527 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4528 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
4529 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
4530 // CHECK6-NEXT:    ret void
4531 //
4532 //
4533 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
4534 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
4535 // CHECK6-NEXT:  entry:
4536 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4537 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4538 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4539 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 8
4540 // CHECK6-NEXT:    [[X1:%.*]] = alloca i32, align 4
4541 // CHECK6-NEXT:    [[Y2:%.*]] = alloca i32, align 4
4542 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
4543 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4544 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4545 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4546 // CHECK6-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 8
4547 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4548 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
4549 // CHECK6-NEXT:    store i32 0, i32* [[X1]], align 4
4550 // CHECK6-NEXT:    store i32 0, i32* [[Y2]], align 4
4551 // CHECK6-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4552 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
4553 // CHECK6-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 8
4554 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
4555 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
4556 // CHECK6-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
4557 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4558 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
4559 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4560 // CHECK6-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
4561 // CHECK6-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4562 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4563 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4564 // CHECK6-NEXT:    ]
4565 // CHECK6:       .omp.reduction.case1:
4566 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
4567 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
4568 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4569 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
4570 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
4571 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
4572 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4573 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
4574 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4575 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4576 // CHECK6:       .omp.reduction.case2:
4577 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
4578 // CHECK6-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
4579 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
4580 // CHECK6-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
4581 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4582 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4583 // CHECK6:       .omp.reduction.default:
4584 // CHECK6-NEXT:    ret void
4585 //
4586 //
4587 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
4588 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
4589 // CHECK6-NEXT:  entry:
4590 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4591 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4592 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4593 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4594 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4595 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
4596 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4597 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
4598 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
4599 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4600 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
4601 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
4602 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4603 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
4604 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
4605 // CHECK6-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
4606 // CHECK6-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
4607 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
4608 // CHECK6-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
4609 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
4610 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
4611 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
4612 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
4613 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
4614 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
4615 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
4616 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
4617 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
4618 // CHECK6-NEXT:    ret void
4619 //
4620 //
4621 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
4622 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
4623 // CHECK6-NEXT:  entry:
4624 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4625 // CHECK6-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
4626 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4627 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4628 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4629 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
4630 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
4631 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
4632 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP2]])
4633 // CHECK6-NEXT:    ret void
4634 //
4635 //
4636 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
4637 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
4638 // CHECK6-NEXT:  entry:
4639 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4640 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4641 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
4642 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4643 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4644 // CHECK6-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
4645 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
4646 // CHECK6-NEXT:    ret void
4647 //
4648 //
4649 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
4650 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
4651 // CHECK6-NEXT:  entry:
4652 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4653 // CHECK6-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
4654 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4655 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4656 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4657 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
4658 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
4659 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
4660 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]])
4661 // CHECK6-NEXT:    ret void
4662 //
4663 //
4664 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..4
4665 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
4666 // CHECK6-NEXT:  entry:
4667 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4668 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4669 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
4670 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4671 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4672 // CHECK6-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
4673 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
4674 // CHECK6-NEXT:    ret void
4675 //
4676 //
4677 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
4678 // CHECK6-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
4679 // CHECK6-NEXT:  entry:
4680 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 8
4681 // CHECK6-NEXT:    [[X_CASTED:%.*]] = alloca i64, align 8
4682 // CHECK6-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 8
4683 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
4684 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4685 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
4686 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
4687 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
4688 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP2]])
4689 // CHECK6-NEXT:    ret void
4690 //
4691 //
4692 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..5
4693 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
4694 // CHECK6-NEXT:  entry:
4695 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4696 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4697 // CHECK6-NEXT:    [[X_ADDR:%.*]] = alloca i64, align 8
4698 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4699 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4700 // CHECK6-NEXT:    store i64 [[X]], i64* [[X_ADDR]], align 8
4701 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
4702 // CHECK6-NEXT:    ret void
4703 //
4704 //
4705 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
4706 // CHECK6-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4707 // CHECK6-NEXT:  entry:
4708 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4709 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4710 // CHECK6-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4711 // CHECK6-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4712 // CHECK6-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4713 // CHECK6-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4714 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
4715 // CHECK6-NEXT:    ret void
4716 //
4717 //
4718 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..6
4719 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4720 // CHECK6-NEXT:  entry:
4721 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4722 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4723 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4724 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4725 // CHECK6-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
4726 // CHECK6-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
4727 // CHECK6-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
4728 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4729 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4730 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4731 // CHECK6-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4732 // CHECK6-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4733 // CHECK6-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4734 // CHECK6-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4735 // CHECK6-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
4736 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
4737 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
4738 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
4739 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
4740 // CHECK6-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
4741 // CHECK6-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
4742 // CHECK6:       omp.arrayinit.body:
4743 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
4744 // CHECK6-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4745 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4746 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
4747 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
4748 // CHECK6:       omp.arrayinit.done:
4749 // CHECK6-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
4750 // CHECK6-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
4751 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4752 // CHECK6-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
4753 // CHECK6-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
4754 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4755 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4756 // CHECK6-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4757 // CHECK6-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
4758 // CHECK6-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4759 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4760 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4761 // CHECK6-NEXT:    ]
4762 // CHECK6:       .omp.reduction.case1:
4763 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4764 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
4765 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4766 // CHECK6:       omp.arraycpy.body:
4767 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4768 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4769 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4770 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4771 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4772 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4773 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
4774 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4775 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
4776 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
4777 // CHECK6:       omp.arraycpy.done6:
4778 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4779 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4780 // CHECK6:       .omp.reduction.case2:
4781 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4782 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
4783 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
4784 // CHECK6:       omp.arraycpy.body8:
4785 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4786 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4787 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
4788 // CHECK6-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
4789 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
4790 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
4791 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
4792 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
4793 // CHECK6:       omp.arraycpy.done14:
4794 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4795 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4796 // CHECK6:       .omp.reduction.default:
4797 // CHECK6-NEXT:    ret void
4798 //
4799 //
4800 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
4801 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4802 // CHECK6-NEXT:  entry:
4803 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4804 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4805 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4806 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4807 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4808 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4809 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4810 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4811 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4812 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4813 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
4814 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4815 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4816 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
4817 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
4818 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
4819 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4820 // CHECK6:       omp.arraycpy.body:
4821 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4822 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4823 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4824 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4825 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
4826 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4827 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4828 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4829 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
4830 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
4831 // CHECK6:       omp.arraycpy.done2:
4832 // CHECK6-NEXT:    ret void
4833 //
4834 //
4835 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
4836 // CHECK6-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4837 // CHECK6-NEXT:  entry:
4838 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4839 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4840 // CHECK6-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4841 // CHECK6-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4842 // CHECK6-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4843 // CHECK6-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4844 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
4845 // CHECK6-NEXT:    ret void
4846 //
4847 //
4848 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..8
4849 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
4850 // CHECK6-NEXT:  entry:
4851 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4852 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4853 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
4854 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
4855 // CHECK6-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
4856 // CHECK6-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
4857 // CHECK6-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
4858 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4859 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4860 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4861 // CHECK6-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
4862 // CHECK6-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
4863 // CHECK6-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
4864 // CHECK6-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
4865 // CHECK6-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
4866 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
4867 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
4868 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
4869 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
4870 // CHECK6-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
4871 // CHECK6-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
4872 // CHECK6:       omp.arrayinit.body:
4873 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
4874 // CHECK6-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4875 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4876 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
4877 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
4878 // CHECK6:       omp.arrayinit.done:
4879 // CHECK6-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
4880 // CHECK6-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
4881 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
4882 // CHECK6-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
4883 // CHECK6-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
4884 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4885 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
4886 // CHECK6-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4887 // CHECK6-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
4888 // CHECK6-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
4889 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
4890 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
4891 // CHECK6-NEXT:    ]
4892 // CHECK6:       .omp.reduction.case1:
4893 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4894 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
4895 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4896 // CHECK6:       omp.arraycpy.body:
4897 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4898 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4899 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4900 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4901 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4902 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
4903 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
4904 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4905 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
4906 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
4907 // CHECK6:       omp.arraycpy.done6:
4908 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4909 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4910 // CHECK6:       .omp.reduction.case2:
4911 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
4912 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
4913 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
4914 // CHECK6:       omp.arraycpy.body8:
4915 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4916 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
4917 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
4918 // CHECK6-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
4919 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
4920 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
4921 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
4922 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
4923 // CHECK6:       omp.arraycpy.done14:
4924 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
4925 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
4926 // CHECK6:       .omp.reduction.default:
4927 // CHECK6-NEXT:    ret void
4928 //
4929 //
4930 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
4931 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
4932 // CHECK6-NEXT:  entry:
4933 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
4934 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
4935 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
4936 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
4937 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
4938 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
4939 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
4940 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
4941 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
4942 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
4943 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
4944 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
4945 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
4946 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
4947 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
4948 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
4949 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4950 // CHECK6:       omp.arraycpy.body:
4951 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4952 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4953 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4954 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
4955 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
4956 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
4957 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4958 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4959 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
4960 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
4961 // CHECK6:       omp.arraycpy.done2:
4962 // CHECK6-NEXT:    ret void
4963 //
4964 //
4965 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
4966 // CHECK6-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4967 // CHECK6-NEXT:  entry:
4968 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4969 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4970 // CHECK6-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4971 // CHECK6-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4972 // CHECK6-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4973 // CHECK6-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4974 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
4975 // CHECK6-NEXT:    ret void
4976 //
4977 //
4978 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..10
4979 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
4980 // CHECK6-NEXT:  entry:
4981 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4982 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4983 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
4984 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
4985 // CHECK6-NEXT:    [[Y1:%.*]] = alloca i128, align 16
4986 // CHECK6-NEXT:    [[X:%.*]] = alloca i128, align 16
4987 // CHECK6-NEXT:    [[Z2:%.*]] = alloca i128, align 16
4988 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
4989 // CHECK6-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
4990 // CHECK6-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
4991 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i128, align 16
4992 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4993 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4994 // CHECK6-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
4995 // CHECK6-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
4996 // CHECK6-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
4997 // CHECK6-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
4998 // CHECK6-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
4999 // CHECK6-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
5000 // CHECK6-NEXT:    store i128 0, i128* [[Z2]], align 16
5001 // CHECK6-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
5002 // CHECK6-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
5003 // CHECK6-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
5004 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5005 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5006 // CHECK6-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5007 // CHECK6-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var)
5008 // CHECK6-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5009 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5010 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5011 // CHECK6-NEXT:    ]
5012 // CHECK6:       .omp.reduction.case1:
5013 // CHECK6-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
5014 // CHECK6-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
5015 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
5016 // CHECK6-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
5017 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5018 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5019 // CHECK6:       .omp.reduction.case2:
5020 // CHECK6-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
5021 // CHECK6-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
5022 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
5023 // CHECK6-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0) #[[ATTR6:[0-9]+]]
5024 // CHECK6-NEXT:    br label [[ATOMIC_CONT:%.*]]
5025 // CHECK6:       atomic_cont:
5026 // CHECK6-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
5027 // CHECK6-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
5028 // CHECK6-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
5029 // CHECK6-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
5030 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
5031 // CHECK6-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
5032 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
5033 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
5034 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
5035 // CHECK6-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0) #[[ATTR6]]
5036 // CHECK6-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
5037 // CHECK6:       atomic_exit:
5038 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5039 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5040 // CHECK6:       .omp.reduction.default:
5041 // CHECK6-NEXT:    ret void
5042 //
5043 //
5044 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11
5045 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
5046 // CHECK6-NEXT:  entry:
5047 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
5048 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
5049 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
5050 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
5051 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
5052 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
5053 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
5054 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
5055 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
5056 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
5057 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
5058 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
5059 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
5060 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
5061 // CHECK6-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
5062 // CHECK6-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
5063 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
5064 // CHECK6-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
5065 // CHECK6-NEXT:    ret void
5066 //
5067 //
5068 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
5069 // CHECK6-SAME: (i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
5070 // CHECK6-NEXT:  entry:
5071 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
5072 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
5073 // CHECK6-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
5074 // CHECK6-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
5075 // CHECK6-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
5076 // CHECK6-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
5077 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
5078 // CHECK6-NEXT:    ret void
5079 //
5080 //
5081 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..12
5082 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i128* nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
5083 // CHECK6-NEXT:  entry:
5084 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5085 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5086 // CHECK6-NEXT:    [[Y_ADDR:%.*]] = alloca i128*, align 8
5087 // CHECK6-NEXT:    [[Z_ADDR:%.*]] = alloca i128*, align 8
5088 // CHECK6-NEXT:    [[Y1:%.*]] = alloca i128, align 16
5089 // CHECK6-NEXT:    [[X:%.*]] = alloca i128, align 16
5090 // CHECK6-NEXT:    [[Z2:%.*]] = alloca i128, align 16
5091 // CHECK6-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
5092 // CHECK6-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
5093 // CHECK6-NEXT:    [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
5094 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i128, align 16
5095 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5096 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5097 // CHECK6-NEXT:    store i128* [[Y]], i128** [[Y_ADDR]], align 8
5098 // CHECK6-NEXT:    store i128* [[Z]], i128** [[Z_ADDR]], align 8
5099 // CHECK6-NEXT:    [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
5100 // CHECK6-NEXT:    [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
5101 // CHECK6-NEXT:    [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
5102 // CHECK6-NEXT:    store i128 [[TMP2]], i128* [[Y1]], align 16
5103 // CHECK6-NEXT:    store i128 0, i128* [[Z2]], align 16
5104 // CHECK6-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
5105 // CHECK6-NEXT:    [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
5106 // CHECK6-NEXT:    store i8* [[TMP4]], i8** [[TMP3]], align 8
5107 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5108 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
5109 // CHECK6-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5110 // CHECK6-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.13, [8 x i32]* @.gomp_critical_user_.reduction.var)
5111 // CHECK6-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5112 // CHECK6-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5113 // CHECK6-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5114 // CHECK6-NEXT:    ]
5115 // CHECK6:       .omp.reduction.case1:
5116 // CHECK6-NEXT:    [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
5117 // CHECK6-NEXT:    [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
5118 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
5119 // CHECK6-NEXT:    store i128 [[ADD]], i128* [[TMP1]], align 16
5120 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5121 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5122 // CHECK6:       .omp.reduction.case2:
5123 // CHECK6-NEXT:    [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
5124 // CHECK6-NEXT:    [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
5125 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
5126 // CHECK6-NEXT:    call void @__atomic_load(i64 16, i8* [[TMP12]], i8* [[TMP13]], i32 signext 0) #[[ATTR6]]
5127 // CHECK6-NEXT:    br label [[ATOMIC_CONT:%.*]]
5128 // CHECK6:       atomic_cont:
5129 // CHECK6-NEXT:    [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
5130 // CHECK6-NEXT:    store i128 [[TMP14]], i128* [[TMP]], align 16
5131 // CHECK6-NEXT:    [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
5132 // CHECK6-NEXT:    [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
5133 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
5134 // CHECK6-NEXT:    store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
5135 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
5136 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
5137 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
5138 // CHECK6-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* [[TMP17]], i8* [[TMP18]], i8* [[TMP19]], i32 signext 0, i32 signext 0) #[[ATTR6]]
5139 // CHECK6-NEXT:    br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
5140 // CHECK6:       atomic_exit:
5141 // CHECK6-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5142 // CHECK6-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5143 // CHECK6:       .omp.reduction.default:
5144 // CHECK6-NEXT:    ret void
5145 //
5146 //
5147 // CHECK6-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.13
5148 // CHECK6-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
5149 // CHECK6-NEXT:  entry:
5150 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
5151 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
5152 // CHECK6-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
5153 // CHECK6-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
5154 // CHECK6-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
5155 // CHECK6-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
5156 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
5157 // CHECK6-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
5158 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
5159 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
5160 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
5161 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
5162 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
5163 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
5164 // CHECK6-NEXT:    [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
5165 // CHECK6-NEXT:    [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
5166 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
5167 // CHECK6-NEXT:    store i128 [[ADD]], i128* [[TMP11]], align 16
5168 // CHECK6-NEXT:    ret void
5169 //
5170 //
5171 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
5172 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
5173 // CHECK7-NEXT:  entry:
5174 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
5175 // CHECK7-NEXT:    ret void
5176 //
5177 //
5178 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
5179 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
5180 // CHECK7-NEXT:  entry:
5181 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5182 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5183 // CHECK7-NEXT:    [[X:%.*]] = alloca i32, align 4
5184 // CHECK7-NEXT:    [[Y:%.*]] = alloca i32, align 4
5185 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5186 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5187 // CHECK7-NEXT:    ret void
5188 //
5189 //
5190 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
5191 // CHECK7-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5192 // CHECK7-NEXT:  entry:
5193 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5194 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5195 // CHECK7-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5196 // CHECK7-NEXT:    [[Y_CASTED:%.*]] = alloca i32, align 4
5197 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5198 // CHECK7-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5199 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5200 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5201 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
5202 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[X_CASTED]], align 4
5203 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
5204 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
5205 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
5206 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
5207 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
5208 // CHECK7-NEXT:    ret void
5209 //
5210 //
5211 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
5212 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
5213 // CHECK7-NEXT:  entry:
5214 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5215 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5216 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5217 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca i32, align 4
5218 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5219 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5220 // CHECK7-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5221 // CHECK7-NEXT:    store i32 [[Y]], i32* [[Y_ADDR]], align 4
5222 // CHECK7-NEXT:    ret void
5223 //
5224 //
5225 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
5226 // CHECK7-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5227 // CHECK7-NEXT:  entry:
5228 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5229 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5230 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5231 // CHECK7-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5232 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5233 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5234 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
5235 // CHECK7-NEXT:    ret void
5236 //
5237 //
5238 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
5239 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5240 // CHECK7-NEXT:  entry:
5241 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5242 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5243 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5244 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5245 // CHECK7-NEXT:    [[X1:%.*]] = alloca i32, align 4
5246 // CHECK7-NEXT:    [[Y2:%.*]] = alloca i32, align 4
5247 // CHECK7-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
5248 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5249 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5250 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5251 // CHECK7-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5252 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5253 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5254 // CHECK7-NEXT:    store i32 0, i32* [[X1]], align 4
5255 // CHECK7-NEXT:    store i32 0, i32* [[Y2]], align 4
5256 // CHECK7-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
5257 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
5258 // CHECK7-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 4
5259 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
5260 // CHECK7-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
5261 // CHECK7-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 4
5262 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5263 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
5264 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5265 // CHECK7-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
5266 // CHECK7-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5267 // CHECK7-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5268 // CHECK7-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5269 // CHECK7-NEXT:    ]
5270 // CHECK7:       .omp.reduction.case1:
5271 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
5272 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
5273 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
5274 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
5275 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
5276 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
5277 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5278 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
5279 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5280 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5281 // CHECK7:       .omp.reduction.case2:
5282 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
5283 // CHECK7-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
5284 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
5285 // CHECK7-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
5286 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5287 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5288 // CHECK7:       .omp.reduction.default:
5289 // CHECK7-NEXT:    ret void
5290 //
5291 //
5292 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
5293 // CHECK7-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
5294 // CHECK7-NEXT:  entry:
5295 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
5296 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
5297 // CHECK7-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
5298 // CHECK7-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
5299 // CHECK7-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
5300 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
5301 // CHECK7-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
5302 // CHECK7-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
5303 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
5304 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5305 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
5306 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
5307 // CHECK7-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
5308 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
5309 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
5310 // CHECK7-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
5311 // CHECK7-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
5312 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
5313 // CHECK7-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
5314 // CHECK7-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
5315 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
5316 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
5317 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
5318 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
5319 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
5320 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
5321 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5322 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
5323 // CHECK7-NEXT:    ret void
5324 //
5325 //
5326 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
5327 // CHECK7-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5328 // CHECK7-NEXT:  entry:
5329 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5330 // CHECK7-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5331 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5332 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5333 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5334 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5335 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5336 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5337 // CHECK7-NEXT:    ret void
5338 //
5339 //
5340 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
5341 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5342 // CHECK7-NEXT:  entry:
5343 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5344 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5345 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5346 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5347 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5348 // CHECK7-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5349 // CHECK7-NEXT:    ret void
5350 //
5351 //
5352 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
5353 // CHECK7-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5354 // CHECK7-NEXT:  entry:
5355 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5356 // CHECK7-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5357 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5358 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5359 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5360 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5361 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5362 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5363 // CHECK7-NEXT:    ret void
5364 //
5365 //
5366 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4
5367 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5368 // CHECK7-NEXT:  entry:
5369 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5370 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5371 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5372 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5373 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5374 // CHECK7-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5375 // CHECK7-NEXT:    ret void
5376 //
5377 //
5378 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
5379 // CHECK7-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5380 // CHECK7-NEXT:  entry:
5381 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5382 // CHECK7-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5383 // CHECK7-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5384 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5385 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5386 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5387 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5388 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5389 // CHECK7-NEXT:    ret void
5390 //
5391 //
5392 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..5
5393 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5394 // CHECK7-NEXT:  entry:
5395 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5396 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5397 // CHECK7-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5398 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5399 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5400 // CHECK7-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5401 // CHECK7-NEXT:    ret void
5402 //
5403 //
5404 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
5405 // CHECK7-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5406 // CHECK7-NEXT:  entry:
5407 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5408 // CHECK7-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5409 // CHECK7-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5410 // CHECK7-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5411 // CHECK7-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5412 // CHECK7-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5413 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
5414 // CHECK7-NEXT:    ret void
5415 //
5416 //
5417 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6
5418 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5419 // CHECK7-NEXT:  entry:
5420 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5421 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5422 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5423 // CHECK7-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5424 // CHECK7-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
5425 // CHECK7-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
5426 // CHECK7-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
5427 // CHECK7-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
5428 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5429 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5430 // CHECK7-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5431 // CHECK7-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5432 // CHECK7-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5433 // CHECK7-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5434 // CHECK7-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
5435 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
5436 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
5437 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
5438 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
5439 // CHECK7-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
5440 // CHECK7-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
5441 // CHECK7:       omp.arrayinit.body:
5442 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
5443 // CHECK7-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5444 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5445 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
5446 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
5447 // CHECK7:       omp.arrayinit.done:
5448 // CHECK7-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
5449 // CHECK7-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
5450 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
5451 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
5452 // CHECK7-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
5453 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5454 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
5455 // CHECK7-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5456 // CHECK7-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
5457 // CHECK7-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5458 // CHECK7-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5459 // CHECK7-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5460 // CHECK7-NEXT:    ]
5461 // CHECK7:       .omp.reduction.case1:
5462 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5463 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
5464 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5465 // CHECK7:       omp.arraycpy.body:
5466 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5467 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5468 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5469 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
5470 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5471 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5472 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
5473 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5474 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
5475 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
5476 // CHECK7:       omp.arraycpy.done6:
5477 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5478 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5479 // CHECK7:       .omp.reduction.case2:
5480 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5481 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
5482 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
5483 // CHECK7:       omp.arraycpy.body8:
5484 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5485 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5486 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
5487 // CHECK7-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
5488 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
5489 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
5490 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
5491 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
5492 // CHECK7:       omp.arraycpy.done14:
5493 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5494 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5495 // CHECK7:       .omp.reduction.default:
5496 // CHECK7-NEXT:    ret void
5497 //
5498 //
5499 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
5500 // CHECK7-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
5501 // CHECK7-NEXT:  entry:
5502 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
5503 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
5504 // CHECK7-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
5505 // CHECK7-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
5506 // CHECK7-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
5507 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
5508 // CHECK7-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
5509 // CHECK7-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
5510 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
5511 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5512 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
5513 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
5514 // CHECK7-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
5515 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
5516 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
5517 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
5518 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5519 // CHECK7:       omp.arraycpy.body:
5520 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5521 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5522 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5523 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
5524 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
5525 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5526 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5527 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5528 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
5529 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
5530 // CHECK7:       omp.arraycpy.done2:
5531 // CHECK7-NEXT:    ret void
5532 //
5533 //
5534 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
5535 // CHECK7-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5536 // CHECK7-NEXT:  entry:
5537 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5538 // CHECK7-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5539 // CHECK7-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5540 // CHECK7-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5541 // CHECK7-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5542 // CHECK7-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5543 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
5544 // CHECK7-NEXT:    ret void
5545 //
5546 //
5547 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..8
5548 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5549 // CHECK7-NEXT:  entry:
5550 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5551 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5552 // CHECK7-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5553 // CHECK7-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5554 // CHECK7-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
5555 // CHECK7-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
5556 // CHECK7-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
5557 // CHECK7-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
5558 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5559 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5560 // CHECK7-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5561 // CHECK7-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5562 // CHECK7-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5563 // CHECK7-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5564 // CHECK7-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
5565 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
5566 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
5567 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
5568 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
5569 // CHECK7-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
5570 // CHECK7-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
5571 // CHECK7:       omp.arrayinit.body:
5572 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
5573 // CHECK7-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5574 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5575 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
5576 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
5577 // CHECK7:       omp.arrayinit.done:
5578 // CHECK7-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
5579 // CHECK7-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
5580 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
5581 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
5582 // CHECK7-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
5583 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5584 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
5585 // CHECK7-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5586 // CHECK7-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
5587 // CHECK7-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5588 // CHECK7-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5589 // CHECK7-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5590 // CHECK7-NEXT:    ]
5591 // CHECK7:       .omp.reduction.case1:
5592 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5593 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
5594 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5595 // CHECK7:       omp.arraycpy.body:
5596 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5597 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5598 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5599 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
5600 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5601 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5602 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
5603 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5604 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
5605 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
5606 // CHECK7:       omp.arraycpy.done6:
5607 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5608 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5609 // CHECK7:       .omp.reduction.case2:
5610 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5611 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
5612 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
5613 // CHECK7:       omp.arraycpy.body8:
5614 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5615 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5616 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
5617 // CHECK7-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
5618 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
5619 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
5620 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
5621 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
5622 // CHECK7:       omp.arraycpy.done14:
5623 // CHECK7-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5624 // CHECK7-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5625 // CHECK7:       .omp.reduction.default:
5626 // CHECK7-NEXT:    ret void
5627 //
5628 //
5629 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
5630 // CHECK7-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
5631 // CHECK7-NEXT:  entry:
5632 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
5633 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
5634 // CHECK7-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
5635 // CHECK7-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
5636 // CHECK7-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
5637 // CHECK7-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
5638 // CHECK7-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
5639 // CHECK7-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
5640 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
5641 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5642 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
5643 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
5644 // CHECK7-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
5645 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
5646 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
5647 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
5648 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5649 // CHECK7:       omp.arraycpy.body:
5650 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5651 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5652 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5653 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
5654 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
5655 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5656 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5657 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5658 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
5659 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
5660 // CHECK7:       omp.arraycpy.done2:
5661 // CHECK7-NEXT:    ret void
5662 //
5663 //
5664 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
5665 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
5666 // CHECK8-NEXT:  entry:
5667 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
5668 // CHECK8-NEXT:    ret void
5669 //
5670 //
5671 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
5672 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
5673 // CHECK8-NEXT:  entry:
5674 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5675 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5676 // CHECK8-NEXT:    [[X:%.*]] = alloca i32, align 4
5677 // CHECK8-NEXT:    [[Y:%.*]] = alloca i32, align 4
5678 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5679 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5680 // CHECK8-NEXT:    ret void
5681 //
5682 //
5683 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
5684 // CHECK8-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5685 // CHECK8-NEXT:  entry:
5686 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5687 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5688 // CHECK8-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5689 // CHECK8-NEXT:    [[Y_CASTED:%.*]] = alloca i32, align 4
5690 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5691 // CHECK8-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5692 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5693 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5694 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
5695 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[X_CASTED]], align 4
5696 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
5697 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
5698 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
5699 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
5700 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
5701 // CHECK8-NEXT:    ret void
5702 //
5703 //
5704 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..1
5705 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
5706 // CHECK8-NEXT:  entry:
5707 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5708 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5709 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5710 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca i32, align 4
5711 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5712 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5713 // CHECK8-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5714 // CHECK8-NEXT:    store i32 [[Y]], i32* [[Y_ADDR]], align 4
5715 // CHECK8-NEXT:    ret void
5716 //
5717 //
5718 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
5719 // CHECK8-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5720 // CHECK8-NEXT:  entry:
5721 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5722 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5723 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5724 // CHECK8-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5725 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5726 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5727 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
5728 // CHECK8-NEXT:    ret void
5729 //
5730 //
5731 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
5732 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[X:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
5733 // CHECK8-NEXT:  entry:
5734 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5735 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5736 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5737 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca i32*, align 4
5738 // CHECK8-NEXT:    [[X1:%.*]] = alloca i32, align 4
5739 // CHECK8-NEXT:    [[Y2:%.*]] = alloca i32, align 4
5740 // CHECK8-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
5741 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5742 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5743 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5744 // CHECK8-NEXT:    store i32* [[Y]], i32** [[Y_ADDR]], align 4
5745 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5746 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
5747 // CHECK8-NEXT:    store i32 0, i32* [[X1]], align 4
5748 // CHECK8-NEXT:    store i32 0, i32* [[Y2]], align 4
5749 // CHECK8-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
5750 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
5751 // CHECK8-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 4
5752 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
5753 // CHECK8-NEXT:    [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
5754 // CHECK8-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 4
5755 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5756 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
5757 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5758 // CHECK8-NEXT:    [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
5759 // CHECK8-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5760 // CHECK8-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5761 // CHECK8-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5762 // CHECK8-NEXT:    ]
5763 // CHECK8:       .omp.reduction.case1:
5764 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
5765 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
5766 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
5767 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
5768 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
5769 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
5770 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5771 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[TMP1]], align 4
5772 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5773 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5774 // CHECK8:       .omp.reduction.case2:
5775 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
5776 // CHECK8-NEXT:    [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
5777 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
5778 // CHECK8-NEXT:    [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
5779 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5780 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5781 // CHECK8:       .omp.reduction.default:
5782 // CHECK8-NEXT:    ret void
5783 //
5784 //
5785 // CHECK8-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
5786 // CHECK8-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
5787 // CHECK8-NEXT:  entry:
5788 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
5789 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
5790 // CHECK8-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
5791 // CHECK8-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
5792 // CHECK8-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
5793 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
5794 // CHECK8-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
5795 // CHECK8-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
5796 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
5797 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5798 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
5799 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
5800 // CHECK8-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
5801 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
5802 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
5803 // CHECK8-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
5804 // CHECK8-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
5805 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
5806 // CHECK8-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
5807 // CHECK8-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
5808 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
5809 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
5810 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
5811 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
5812 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
5813 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
5814 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5815 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[TMP17]], align 4
5816 // CHECK8-NEXT:    ret void
5817 //
5818 //
5819 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
5820 // CHECK8-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5821 // CHECK8-NEXT:  entry:
5822 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5823 // CHECK8-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5824 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5825 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5826 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5827 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5828 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5829 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5830 // CHECK8-NEXT:    ret void
5831 //
5832 //
5833 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
5834 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5835 // CHECK8-NEXT:  entry:
5836 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5837 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5838 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5839 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5840 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5841 // CHECK8-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5842 // CHECK8-NEXT:    ret void
5843 //
5844 //
5845 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
5846 // CHECK8-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5847 // CHECK8-NEXT:  entry:
5848 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5849 // CHECK8-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5850 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5851 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5852 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5853 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5854 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5855 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5856 // CHECK8-NEXT:    ret void
5857 //
5858 //
5859 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..4
5860 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5861 // CHECK8-NEXT:  entry:
5862 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5863 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5864 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5865 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5866 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5867 // CHECK8-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5868 // CHECK8-NEXT:    ret void
5869 //
5870 //
5871 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
5872 // CHECK8-SAME: (i32* nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
5873 // CHECK8-NEXT:  entry:
5874 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32*, align 4
5875 // CHECK8-NEXT:    [[X_CASTED:%.*]] = alloca i32, align 4
5876 // CHECK8-NEXT:    store i32* [[X]], i32** [[X_ADDR]], align 4
5877 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
5878 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5879 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[X_CASTED]], align 4
5880 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
5881 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP2]])
5882 // CHECK8-NEXT:    ret void
5883 //
5884 //
5885 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..5
5886 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
5887 // CHECK8-NEXT:  entry:
5888 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5889 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5890 // CHECK8-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
5891 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5892 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5893 // CHECK8-NEXT:    store i32 [[X]], i32* [[X_ADDR]], align 4
5894 // CHECK8-NEXT:    ret void
5895 //
5896 //
5897 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
5898 // CHECK8-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5899 // CHECK8-NEXT:  entry:
5900 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5901 // CHECK8-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5902 // CHECK8-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5903 // CHECK8-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5904 // CHECK8-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5905 // CHECK8-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5906 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
5907 // CHECK8-NEXT:    ret void
5908 //
5909 //
5910 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..6
5911 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
5912 // CHECK8-NEXT:  entry:
5913 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5914 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5915 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
5916 // CHECK8-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
5917 // CHECK8-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
5918 // CHECK8-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
5919 // CHECK8-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
5920 // CHECK8-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
5921 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5922 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5923 // CHECK8-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
5924 // CHECK8-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
5925 // CHECK8-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
5926 // CHECK8-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
5927 // CHECK8-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
5928 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
5929 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
5930 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
5931 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
5932 // CHECK8-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
5933 // CHECK8-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
5934 // CHECK8:       omp.arrayinit.body:
5935 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
5936 // CHECK8-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
5937 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5938 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
5939 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
5940 // CHECK8:       omp.arrayinit.done:
5941 // CHECK8-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
5942 // CHECK8-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
5943 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
5944 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
5945 // CHECK8-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
5946 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5947 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
5948 // CHECK8-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
5949 // CHECK8-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
5950 // CHECK8-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
5951 // CHECK8-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
5952 // CHECK8-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
5953 // CHECK8-NEXT:    ]
5954 // CHECK8:       .omp.reduction.case1:
5955 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5956 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
5957 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5958 // CHECK8:       omp.arraycpy.body:
5959 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5960 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5961 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5962 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
5963 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
5964 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
5965 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
5966 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5967 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
5968 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
5969 // CHECK8:       omp.arraycpy.done6:
5970 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5971 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5972 // CHECK8:       .omp.reduction.case2:
5973 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
5974 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
5975 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
5976 // CHECK8:       omp.arraycpy.body8:
5977 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5978 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
5979 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
5980 // CHECK8-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
5981 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
5982 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
5983 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
5984 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
5985 // CHECK8:       omp.arraycpy.done14:
5986 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
5987 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
5988 // CHECK8:       .omp.reduction.default:
5989 // CHECK8-NEXT:    ret void
5990 //
5991 //
5992 // CHECK8-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
5993 // CHECK8-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
5994 // CHECK8-NEXT:  entry:
5995 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
5996 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
5997 // CHECK8-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
5998 // CHECK8-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
5999 // CHECK8-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
6000 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
6001 // CHECK8-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
6002 // CHECK8-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
6003 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
6004 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
6005 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
6006 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
6007 // CHECK8-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
6008 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
6009 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
6010 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
6011 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6012 // CHECK8:       omp.arraycpy.body:
6013 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6014 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6015 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
6016 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
6017 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
6018 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
6019 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6020 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6021 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
6022 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
6023 // CHECK8:       omp.arraycpy.done2:
6024 // CHECK8-NEXT:    ret void
6025 //
6026 //
6027 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
6028 // CHECK8-SAME: ([88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
6029 // CHECK8-NEXT:  entry:
6030 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
6031 // CHECK8-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
6032 // CHECK8-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
6033 // CHECK8-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
6034 // CHECK8-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
6035 // CHECK8-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
6036 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
6037 // CHECK8-NEXT:    ret void
6038 //
6039 //
6040 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..8
6041 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [88 x i32]* nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
6042 // CHECK8-NEXT:  entry:
6043 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6044 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6045 // CHECK8-NEXT:    [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
6046 // CHECK8-NEXT:    [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
6047 // CHECK8-NEXT:    [[Y1:%.*]] = alloca [88 x i32], align 4
6048 // CHECK8-NEXT:    [[X:%.*]] = alloca [77 x i32], align 4
6049 // CHECK8-NEXT:    [[Z2:%.*]] = alloca [99 x i32], align 4
6050 // CHECK8-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
6051 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6052 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6053 // CHECK8-NEXT:    store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
6054 // CHECK8-NEXT:    store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
6055 // CHECK8-NEXT:    [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
6056 // CHECK8-NEXT:    [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
6057 // CHECK8-NEXT:    [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
6058 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
6059 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
6060 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
6061 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
6062 // CHECK8-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
6063 // CHECK8-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
6064 // CHECK8:       omp.arrayinit.body:
6065 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
6066 // CHECK8-NEXT:    store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
6067 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6068 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
6069 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
6070 // CHECK8:       omp.arrayinit.done:
6071 // CHECK8-NEXT:    [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
6072 // CHECK8-NEXT:    [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
6073 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
6074 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
6075 // CHECK8-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 4
6076 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6077 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
6078 // CHECK8-NEXT:    [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
6079 // CHECK8-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
6080 // CHECK8-NEXT:    switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
6081 // CHECK8-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
6082 // CHECK8-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
6083 // CHECK8-NEXT:    ]
6084 // CHECK8:       .omp.reduction.case1:
6085 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
6086 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
6087 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6088 // CHECK8:       omp.arraycpy.body:
6089 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6090 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6091 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
6092 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
6093 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
6094 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
6095 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
6096 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6097 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
6098 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
6099 // CHECK8:       omp.arraycpy.done6:
6100 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
6101 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
6102 // CHECK8:       .omp.reduction.case2:
6103 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
6104 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
6105 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
6106 // CHECK8:       omp.arraycpy.body8:
6107 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
6108 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
6109 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
6110 // CHECK8-NEXT:    [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
6111 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
6112 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
6113 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
6114 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
6115 // CHECK8:       omp.arraycpy.done14:
6116 // CHECK8-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
6117 // CHECK8-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
6118 // CHECK8:       .omp.reduction.default:
6119 // CHECK8-NEXT:    ret void
6120 //
6121 //
6122 // CHECK8-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
6123 // CHECK8-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR2]] {
6124 // CHECK8-NEXT:  entry:
6125 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
6126 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
6127 // CHECK8-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
6128 // CHECK8-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
6129 // CHECK8-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
6130 // CHECK8-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
6131 // CHECK8-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
6132 // CHECK8-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
6133 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
6134 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
6135 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
6136 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
6137 // CHECK8-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
6138 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
6139 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
6140 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
6141 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6142 // CHECK8:       omp.arraycpy.body:
6143 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6144 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6145 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
6146 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
6147 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
6148 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
6149 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6150 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6151 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
6152 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
6153 // CHECK8:       omp.arraycpy.done2:
6154 // CHECK8-NEXT:    ret void
6155 //
6156