1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -gno-column-info -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
8 
9 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
11 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15 // expected-no-diagnostics
16 #ifndef HEADER
17 #define HEADER
18 
19 
20 template <class T>
21 void foo(T argc) {}
22 
23 template <typename T>
24 int tmain(T argc) {
25   typedef double (*chunk_t)[argc[0][0]];
26 #pragma omp parallel
27   {
28   foo(argc);
29   chunk_t var;(void)var[0][0];
30   }
31   return 0;
32 }
33 
34 int global;
35 int main (int argc, char **argv) {
36   int a[argc];
37 #pragma omp parallel shared(global, a) default(none)
38   foo(a[1]), a[1] = global;
39 #ifndef IRBUILDER
40 // TODO: Support for privates in IRBuilder.
41 #pragma omp parallel private(global, a) default(none)
42 #pragma omp parallel shared(global, a) default(none)
43   foo(a[1]), a[1] = global;
44 // FIXME: IRBuilder crashes in void llvm::OpenMPIRBuilder::finalize()
45 // Assertion `Extractor.isEligible() && "Expected OpenMP outlining to be possible!"' failed.
46 #pragma omp parallel shared(global, a) default(none)
47 #pragma omp parallel shared(global, a) default(none)
48   foo(a[1]), a[1] = global;
49 #endif // IRBUILDER
50   return tmain(argv);
51 }
52 
53 
54 
55 
56 
57 
58 
59 
60 
61 
62 
63 
64 // Note that OpenMPIRBuilder puts the trailing arguments in a different order:
65 // arguments that are wrapped into additional pointers precede the other
66 // arguments. This is expected and not problematic because both the call and the
67 // function are generated from the same place, and the function is internal.
68 
69 
70 
71 
72 #endif
73 // CHECK1-LABEL: define {{[^@]+}}@main
74 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
75 // CHECK1-NEXT:  entry:
76 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
77 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
78 // CHECK1-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
79 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
80 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
81 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
82 // CHECK1-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
83 // CHECK1-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
84 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
85 // CHECK1-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
86 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
87 // CHECK1-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
88 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
89 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
90 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]])
91 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
92 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]])
93 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
94 // CHECK1-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]])
95 // CHECK1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
96 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
97 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]])
98 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4
99 // CHECK1-NEXT:    ret i32 [[TMP5]]
100 //
101 //
102 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
103 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
104 // CHECK1-NEXT:  entry:
105 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
106 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
107 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
108 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
109 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
110 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
111 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
112 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
113 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
114 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
115 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
116 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
117 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP2]])
118 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
119 // CHECK1:       invoke.cont:
120 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* @global, align 4
121 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
122 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4
123 // CHECK1-NEXT:    ret void
124 // CHECK1:       terminate.lpad:
125 // CHECK1-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
126 // CHECK1-NEXT:    catch i8* null
127 // CHECK1-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
128 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6:[0-9]+]]
129 // CHECK1-NEXT:    unreachable
130 //
131 //
132 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
133 // CHECK1-SAME: (i32 [[ARGC:%.*]]) #[[ATTR3:[0-9]+]] comdat {
134 // CHECK1-NEXT:  entry:
135 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
136 // CHECK1-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
137 // CHECK1-NEXT:    ret void
138 //
139 //
140 // CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
141 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat {
142 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
143 // CHECK1-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
144 // CHECK1-NEXT:    unreachable
145 //
146 //
147 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
148 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR2]] {
149 // CHECK1-NEXT:  entry:
150 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
151 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
152 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
153 // CHECK1-NEXT:    [[GLOBAL:%.*]] = alloca i32, align 4
154 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
155 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
156 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
157 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
158 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
159 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
160 // CHECK1-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
161 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8
162 // CHECK1-NEXT:    [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16
163 // CHECK1-NEXT:    store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8
164 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]])
165 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
166 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP2]])
167 // CHECK1-NEXT:    ret void
168 //
169 //
170 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
171 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
172 // CHECK1-NEXT:  entry:
173 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
174 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
175 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
176 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
177 // CHECK1-NEXT:    [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
178 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
179 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
180 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
181 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
182 // CHECK1-NEXT:    store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
183 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
184 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
185 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8
186 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
187 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
188 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP3]])
189 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
190 // CHECK1:       invoke.cont:
191 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4
192 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
193 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4
194 // CHECK1-NEXT:    ret void
195 // CHECK1:       terminate.lpad:
196 // CHECK1-NEXT:    [[TMP5:%.*]] = landingpad { i8*, i32 }
197 // CHECK1-NEXT:    catch i8* null
198 // CHECK1-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0
199 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]]
200 // CHECK1-NEXT:    unreachable
201 //
202 //
203 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
204 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
205 // CHECK1-NEXT:  entry:
206 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
207 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
208 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
209 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
210 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
211 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
212 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
213 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
214 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
215 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
216 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]])
217 // CHECK1-NEXT:    ret void
218 //
219 //
220 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
221 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
222 // CHECK1-NEXT:  entry:
223 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
224 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
225 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
226 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
227 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
228 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
229 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
230 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
231 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
232 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
233 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
234 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
235 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP2]])
236 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
237 // CHECK1:       invoke.cont:
238 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* @global, align 4
239 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
240 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4
241 // CHECK1-NEXT:    ret void
242 // CHECK1:       terminate.lpad:
243 // CHECK1-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
244 // CHECK1-NEXT:    catch i8* null
245 // CHECK1-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
246 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6]]
247 // CHECK1-NEXT:    unreachable
248 //
249 //
250 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
251 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat {
252 // CHECK1-NEXT:  entry:
253 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
254 // CHECK1-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
255 // CHECK1-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
256 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0
257 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
258 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0
259 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
260 // CHECK1-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
261 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]])
262 // CHECK1-NEXT:    ret i32 0
263 //
264 //
265 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
266 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
267 // CHECK1-NEXT:  entry:
268 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
269 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
270 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 8
271 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
272 // CHECK1-NEXT:    [[VAR:%.*]] = alloca double*, align 8
273 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
274 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
275 // CHECK1-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
276 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
277 // CHECK1-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
278 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
279 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8
280 // CHECK1-NEXT:    invoke void @_Z3fooIPPcEvT_(i8** [[TMP2]])
281 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
282 // CHECK1:       invoke.cont:
283 // CHECK1-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8
284 // CHECK1-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]]
285 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]]
286 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0
287 // CHECK1-NEXT:    ret void
288 // CHECK1:       terminate.lpad:
289 // CHECK1-NEXT:    [[TMP5:%.*]] = landingpad { i8*, i32 }
290 // CHECK1-NEXT:    catch i8* null
291 // CHECK1-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0
292 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]]
293 // CHECK1-NEXT:    unreachable
294 //
295 //
296 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
297 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat {
298 // CHECK1-NEXT:  entry:
299 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
300 // CHECK1-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
301 // CHECK1-NEXT:    ret void
302 //
303 //
304 // CHECK2-LABEL: define {{[^@]+}}@main
305 // CHECK2-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] {
306 // CHECK2-NEXT:  entry:
307 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
308 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
309 // CHECK2-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
310 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
311 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
312 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
313 // CHECK2-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
314 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]]
315 // CHECK2-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
316 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]]
317 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]]
318 // CHECK2-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG22:![0-9]+]]
319 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG22]]
320 // CHECK2-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG22]]
321 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG22]]
322 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG22]]
323 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
324 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30:![0-9]+]]
325 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG31:![0-9]+]]
326 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]]), !dbg [[DBG32:![0-9]+]]
327 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG33:![0-9]+]]
328 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG34:![0-9]+]]
329 // CHECK2-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]), !dbg [[DBG35:![0-9]+]]
330 // CHECK2-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG36:![0-9]+]]
331 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG37:![0-9]+]]
332 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG37]]
333 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG37]]
334 // CHECK2-NEXT:    ret i32 [[TMP5]], !dbg [[DBG37]]
335 //
336 //
337 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__
338 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG38:![0-9]+]] {
339 // CHECK2-NEXT:  entry:
340 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
341 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
342 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
343 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
344 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
345 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META46:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47:![0-9]+]]
346 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
347 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META48:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]]
348 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
349 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]]
350 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
351 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META50:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51:![0-9]+]]
352 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG52:![0-9]+]]
353 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG52]]
354 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG53:![0-9]+]]
355 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG53]]
356 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP2]])
357 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG52]]
358 // CHECK2:       invoke.cont:
359 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG54:![0-9]+]]
360 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG55:![0-9]+]]
361 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG56:![0-9]+]]
362 // CHECK2-NEXT:    ret void, !dbg [[DBG54]]
363 // CHECK2:       terminate.lpad:
364 // CHECK2-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
365 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG52]]
366 // CHECK2-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG52]]
367 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7:[0-9]+]], !dbg [[DBG52]]
368 // CHECK2-NEXT:    unreachable, !dbg [[DBG52]]
369 //
370 //
371 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
372 // CHECK2-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat !dbg [[DBG57:![0-9]+]] {
373 // CHECK2-NEXT:  entry:
374 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
375 // CHECK2-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
376 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META62:![0-9]+]], metadata !DIExpression()), !dbg [[DBG63:![0-9]+]]
377 // CHECK2-NEXT:    ret void, !dbg [[DBG64:![0-9]+]]
378 //
379 //
380 // CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
381 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
382 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR6:[0-9]+]]
383 // CHECK2-NEXT:    call void @_ZSt9terminatev() #[[ATTR7]]
384 // CHECK2-NEXT:    unreachable
385 //
386 //
387 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
388 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG65:![0-9]+]] {
389 // CHECK2-NEXT:  entry:
390 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
391 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
392 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
393 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
394 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
395 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67:![0-9]+]]
396 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
397 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]]
398 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
399 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META69:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]]
400 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
401 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]]
402 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG71:![0-9]+]]
403 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG71]]
404 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG71]]
405 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG71]]
406 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG71]]
407 // CHECK2-NEXT:    call void @.omp_outlined._debug__(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG71]]
408 // CHECK2-NEXT:    ret void, !dbg [[DBG71]]
409 //
410 //
411 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.1
412 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG74:![0-9]+]] {
413 // CHECK2-NEXT:  entry:
414 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
415 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
416 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
417 // CHECK2-NEXT:    [[GLOBAL:%.*]] = alloca i32, align 4
418 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
419 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
420 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
421 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78:![0-9]+]]
422 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
423 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
424 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
425 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
426 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG81:![0-9]+]]
427 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
428 // CHECK2-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG81]]
429 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG81]]
430 // CHECK2-NEXT:    [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16, !dbg [[DBG81]]
431 // CHECK2-NEXT:    store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG81]]
432 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
433 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA1]], metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
434 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]]), !dbg [[DBG81]]
435 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG85:![0-9]+]]
436 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP2]]), !dbg [[DBG85]]
437 // CHECK2-NEXT:    ret void, !dbg [[DBG87:![0-9]+]]
438 //
439 //
440 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.2
441 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG88:![0-9]+]] {
442 // CHECK2-NEXT:  entry:
443 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
444 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
445 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
446 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
447 // CHECK2-NEXT:    [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
448 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
449 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92:![0-9]+]]
450 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
451 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92]]
452 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
453 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META94:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92]]
454 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
455 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96:![0-9]+]]
456 // CHECK2-NEXT:    store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
457 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META97:![0-9]+]], metadata !DIExpression()), !dbg [[DBG98:![0-9]+]]
458 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG99:![0-9]+]]
459 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG99]]
460 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG99]]
461 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG100:![0-9]+]]
462 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG100]]
463 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP3]])
464 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG99]]
465 // CHECK2:       invoke.cont:
466 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG101:![0-9]+]]
467 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG102:![0-9]+]]
468 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG103:![0-9]+]]
469 // CHECK2-NEXT:    ret void, !dbg [[DBG101]]
470 // CHECK2:       terminate.lpad:
471 // CHECK2-NEXT:    [[TMP5:%.*]] = landingpad { i8*, i32 }
472 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG99]]
473 // CHECK2-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG99]]
474 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG99]]
475 // CHECK2-NEXT:    unreachable, !dbg [[DBG99]]
476 //
477 //
478 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
479 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] !dbg [[DBG104:![0-9]+]] {
480 // CHECK2-NEXT:  entry:
481 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
482 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
483 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
484 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
485 // CHECK2-NEXT:    [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
486 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
487 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META105:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106:![0-9]+]]
488 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
489 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META107:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
490 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
491 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META108:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
492 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
493 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META109:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
494 // CHECK2-NEXT:    store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
495 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META110:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
496 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG111:![0-9]+]]
497 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG111]]
498 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG111]]
499 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG111]]
500 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG111]]
501 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG111]]
502 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG111]]
503 // CHECK2-NEXT:    call void @.omp_outlined._debug__.2(i32* [[TMP3]], i32* [[TMP4]], i64 [[TMP0]], i32* [[TMP5]], i32* [[TMP6]]) #[[ATTR6]], !dbg [[DBG111]]
504 // CHECK2-NEXT:    ret void, !dbg [[DBG111]]
505 //
506 //
507 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
508 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG112:![0-9]+]] {
509 // CHECK2-NEXT:  entry:
510 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
511 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
512 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
513 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
514 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META113:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114:![0-9]+]]
515 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
516 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]]
517 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
518 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]]
519 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG117:![0-9]+]]
520 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG117]]
521 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG117]]
522 // CHECK2-NEXT:    call void @.omp_outlined._debug__.1(i32* [[TMP1]], i32* [[TMP2]], i64 [[TMP0]]) #[[ATTR6]], !dbg [[DBG117]]
523 // CHECK2-NEXT:    ret void, !dbg [[DBG117]]
524 //
525 //
526 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.5
527 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG118:![0-9]+]] {
528 // CHECK2-NEXT:  entry:
529 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
530 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
531 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
532 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
533 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
534 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META119:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]]
535 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
536 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META121:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120]]
537 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
538 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META122:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120]]
539 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
540 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG124:![0-9]+]]
541 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG125:![0-9]+]]
542 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG125]]
543 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]]), !dbg [[DBG125]]
544 // CHECK2-NEXT:    ret void, !dbg [[DBG126:![0-9]+]]
545 //
546 //
547 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.6
548 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG127:![0-9]+]] {
549 // CHECK2-NEXT:  entry:
550 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
551 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
552 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
553 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
554 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
555 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META128:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129:![0-9]+]]
556 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
557 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META130:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]]
558 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
559 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]]
560 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
561 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
562 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG134:![0-9]+]]
563 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG134]]
564 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG135:![0-9]+]]
565 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG135]]
566 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP2]])
567 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG134]]
568 // CHECK2:       invoke.cont:
569 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG136:![0-9]+]]
570 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG137:![0-9]+]]
571 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG138:![0-9]+]]
572 // CHECK2-NEXT:    ret void, !dbg [[DBG136]]
573 // CHECK2:       terminate.lpad:
574 // CHECK2-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
575 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG134]]
576 // CHECK2-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG134]]
577 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7]], !dbg [[DBG134]]
578 // CHECK2-NEXT:    unreachable, !dbg [[DBG134]]
579 //
580 //
581 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
582 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG139:![0-9]+]] {
583 // CHECK2-NEXT:  entry:
584 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
585 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
586 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
587 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
588 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
589 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]]
590 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
591 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]]
592 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
593 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]]
594 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
595 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]]
596 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG145:![0-9]+]]
597 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG145]]
598 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG145]]
599 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG145]]
600 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG145]]
601 // CHECK2-NEXT:    call void @.omp_outlined._debug__.6(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG145]]
602 // CHECK2-NEXT:    ret void, !dbg [[DBG145]]
603 //
604 //
605 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8
606 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG146:![0-9]+]] {
607 // CHECK2-NEXT:  entry:
608 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
609 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
610 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
611 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
612 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
613 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META147:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148:![0-9]+]]
614 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
615 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]]
616 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
617 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]]
618 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
619 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META151:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]]
620 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG152:![0-9]+]]
621 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG152]]
622 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG152]]
623 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG152]]
624 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG152]]
625 // CHECK2-NEXT:    call void @.omp_outlined._debug__.5(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG152]]
626 // CHECK2-NEXT:    ret void, !dbg [[DBG152]]
627 //
628 //
629 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
630 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG153:![0-9]+]] {
631 // CHECK2-NEXT:  entry:
632 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
633 // CHECK2-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
634 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
635 // CHECK2-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG160:![0-9]+]]
636 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG160]]
637 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG160]]
638 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG160]]
639 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG160]]
640 // CHECK2-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG161:![0-9]+]]
641 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]]), !dbg [[DBG162:![0-9]+]]
642 // CHECK2-NEXT:    ret i32 0, !dbg [[DBG163:![0-9]+]]
643 //
644 //
645 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.9
646 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG164:![0-9]+]] {
647 // CHECK2-NEXT:  entry:
648 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
649 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
650 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 8
651 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
652 // CHECK2-NEXT:    [[VAR:%.*]] = alloca double*, align 8
653 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
654 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META168:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169:![0-9]+]]
655 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
656 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]]
657 // CHECK2-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
658 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
659 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
660 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]]
661 // CHECK2-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG174:![0-9]+]]
662 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG174]]
663 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8, !dbg [[DBG175:![0-9]+]]
664 // CHECK2-NEXT:    invoke void @_Z3fooIPPcEvT_(i8** [[TMP2]])
665 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG177:![0-9]+]]
666 // CHECK2:       invoke.cont:
667 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG185:![0-9]+]]
668 // CHECK2-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG186:![0-9]+]]
669 // CHECK2-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG186]]
670 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG186]]
671 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0, !dbg [[DBG186]]
672 // CHECK2-NEXT:    ret void, !dbg [[DBG187:![0-9]+]]
673 // CHECK2:       terminate.lpad:
674 // CHECK2-NEXT:    [[TMP5:%.*]] = landingpad { i8*, i32 }
675 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG177]]
676 // CHECK2-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG177]]
677 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG177]]
678 // CHECK2-NEXT:    unreachable, !dbg [[DBG177]]
679 //
680 //
681 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
682 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG188:![0-9]+]] {
683 // CHECK2-NEXT:  entry:
684 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
685 // CHECK2-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
686 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META191:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]]
687 // CHECK2-NEXT:    ret void, !dbg [[DBG193:![0-9]+]]
688 //
689 //
690 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
691 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG194:![0-9]+]] {
692 // CHECK2-NEXT:  entry:
693 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
694 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
695 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 8
696 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
697 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
698 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META195:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196:![0-9]+]]
699 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
700 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]]
701 // CHECK2-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
702 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]]
703 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
704 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]]
705 // CHECK2-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG200:![0-9]+]]
706 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG200]]
707 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG200]]
708 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG200]]
709 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG200]]
710 // CHECK2-NEXT:    call void @.omp_outlined._debug__.9(i32* [[TMP2]], i32* [[TMP3]], i8*** [[TMP4]], i64 [[TMP1]]) #[[ATTR6]], !dbg [[DBG200]]
711 // CHECK2-NEXT:    ret void, !dbg [[DBG200]]
712 //
713 //
714 // CHECK3-LABEL: define {{[^@]+}}@main
715 // CHECK3-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
716 // CHECK3-NEXT:  entry:
717 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
718 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
719 // CHECK3-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
720 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
721 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
722 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
723 // CHECK3-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
724 // CHECK3-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
725 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
726 // CHECK3-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
727 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
728 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
729 // CHECK3-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
730 // CHECK3-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
731 // CHECK3-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
732 // CHECK3-NEXT:    br label [[OMP_PARALLEL:%.*]]
733 // CHECK3:       omp_parallel:
734 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]])
735 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
736 // CHECK3:       omp.par.outlined.exit:
737 // CHECK3-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
738 // CHECK3:       omp.par.exit.split:
739 // CHECK3-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
740 // CHECK3-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]])
741 // CHECK3-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
742 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
743 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]])
744 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4
745 // CHECK3-NEXT:    ret i32 [[TMP5]]
746 //
747 //
748 // CHECK3-LABEL: define {{[^@]+}}@main..omp_par
749 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] {
750 // CHECK3-NEXT:  omp.par.entry:
751 // CHECK3-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
752 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
753 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
754 // CHECK3-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
755 // CHECK3-NEXT:    br label [[OMP_PAR_REGION:%.*]]
756 // CHECK3:       omp.par.outlined.exit.exitStub:
757 // CHECK3-NEXT:    ret void
758 // CHECK3:       omp.par.region:
759 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1
760 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
761 // CHECK3-NEXT:    call void @_Z3fooIiEvT_(i32 [[TMP1]])
762 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* @global, align 4
763 // CHECK3-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1
764 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4
765 // CHECK3-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]]
766 // CHECK3:       omp.par.pre_finalize:
767 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
768 //
769 //
770 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
771 // CHECK3-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
772 // CHECK3-NEXT:  entry:
773 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
774 // CHECK3-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
775 // CHECK3-NEXT:    ret void
776 //
777 //
778 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
779 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
780 // CHECK3-NEXT:  entry:
781 // CHECK3-NEXT:    [[DOTRELOADED:%.*]] = alloca i64, align 8
782 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
783 // CHECK3-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
784 // CHECK3-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
785 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0
786 // CHECK3-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
787 // CHECK3-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0
788 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
789 // CHECK3-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
790 // CHECK3-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
791 // CHECK3-NEXT:    store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8
792 // CHECK3-NEXT:    br label [[OMP_PARALLEL:%.*]]
793 // CHECK3:       omp_parallel:
794 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]])
795 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
796 // CHECK3:       omp.par.outlined.exit:
797 // CHECK3-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
798 // CHECK3:       omp.par.exit.split:
799 // CHECK3-NEXT:    ret i32 0
800 //
801 //
802 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par
803 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] {
804 // CHECK3-NEXT:  omp.par.entry:
805 // CHECK3-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
806 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
807 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
808 // CHECK3-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
809 // CHECK3-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8
810 // CHECK3-NEXT:    [[VAR:%.*]] = alloca double*, align 8
811 // CHECK3-NEXT:    br label [[OMP_PAR_REGION:%.*]]
812 // CHECK3:       omp.par.outlined.exit.exitStub:
813 // CHECK3-NEXT:    ret void
814 // CHECK3:       omp.par.region:
815 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
816 // CHECK3-NEXT:    call void @_Z3fooIPPcEvT_(i8** [[TMP2]])
817 // CHECK3-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8
818 // CHECK3-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]]
819 // CHECK3-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]]
820 // CHECK3-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0
821 // CHECK3-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]]
822 // CHECK3:       omp.par.pre_finalize:
823 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
824 //
825 //
826 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
827 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat {
828 // CHECK3-NEXT:  entry:
829 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
830 // CHECK3-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
831 // CHECK3-NEXT:    ret void
832 //
833 //
834 // CHECK4-LABEL: define {{[^@]+}}@main
835 // CHECK4-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] {
836 // CHECK4-NEXT:  entry:
837 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
838 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
839 // CHECK4-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
840 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
841 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
842 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
843 // CHECK4-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
844 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]]
845 // CHECK4-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
846 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
847 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG20:![0-9]+]]
848 // CHECK4-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG20]]
849 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG20]]
850 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG20]]
851 // CHECK4-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG20]]
852 // CHECK4-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG20]]
853 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META21:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
854 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20]]
855 // CHECK4-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG28:![0-9]+]]
856 // CHECK4-NEXT:    br label [[OMP_PARALLEL:%.*]]
857 // CHECK4:       omp_parallel:
858 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]]), !dbg [[DBG29:![0-9]+]]
859 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
860 // CHECK4:       omp.par.outlined.exit:
861 // CHECK4-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
862 // CHECK4:       omp.par.exit.split:
863 // CHECK4-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG30:![0-9]+]]
864 // CHECK4-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]), !dbg [[DBG30]]
865 // CHECK4-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG30]]
866 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG31:![0-9]+]]
867 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG31]]
868 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG31]]
869 // CHECK4-NEXT:    ret i32 [[TMP5]], !dbg [[DBG31]]
870 //
871 //
872 // CHECK4-LABEL: define {{[^@]+}}@main..omp_par
873 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] !dbg [[DBG32:![0-9]+]] {
874 // CHECK4-NEXT:  omp.par.entry:
875 // CHECK4-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
876 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
877 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
878 // CHECK4-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
879 // CHECK4-NEXT:    br label [[OMP_PAR_REGION:%.*]]
880 // CHECK4:       omp.par.outlined.exit.exitStub:
881 // CHECK4-NEXT:    ret void
882 // CHECK4:       omp.par.region:
883 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34:![0-9]+]]
884 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG34]]
885 // CHECK4-NEXT:    call void @_Z3fooIiEvT_(i32 [[TMP1]]), !dbg [[DBG34]]
886 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG34]]
887 // CHECK4-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34]]
888 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG34]]
889 // CHECK4-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG34]]
890 // CHECK4:       omp.par.pre_finalize:
891 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG34]]
892 //
893 //
894 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
895 // CHECK4-SAME: (i32 [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat !dbg [[DBG35:![0-9]+]] {
896 // CHECK4-NEXT:  entry:
897 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
898 // CHECK4-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
899 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG41:![0-9]+]]
900 // CHECK4-NEXT:    ret void, !dbg [[DBG41]]
901 //
902 //
903 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
904 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat !dbg [[DBG44:![0-9]+]] {
905 // CHECK4-NEXT:  entry:
906 // CHECK4-NEXT:    [[DOTRELOADED:%.*]] = alloca i64, align 8
907 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
908 // CHECK4-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
909 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50:![0-9]+]]
910 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG51:![0-9]+]]
911 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG51]]
912 // CHECK4-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG51]]
913 // CHECK4-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG51]]
914 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG51]]
915 // CHECK4-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG51]]
916 // CHECK4-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]), !dbg [[DBG52:![0-9]+]]
917 // CHECK4-NEXT:    store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8
918 // CHECK4-NEXT:    br label [[OMP_PARALLEL:%.*]]
919 // CHECK4:       omp_parallel:
920 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]]), !dbg [[DBG53:![0-9]+]]
921 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
922 // CHECK4:       omp.par.outlined.exit:
923 // CHECK4-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
924 // CHECK4:       omp.par.exit.split:
925 // CHECK4-NEXT:    ret i32 0, !dbg [[DBG55:![0-9]+]]
926 //
927 //
928 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par
929 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] !dbg [[DBG56:![0-9]+]] {
930 // CHECK4-NEXT:  omp.par.entry:
931 // CHECK4-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
932 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
933 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
934 // CHECK4-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
935 // CHECK4-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8
936 // CHECK4-NEXT:    [[VAR:%.*]] = alloca double*, align 8
937 // CHECK4-NEXT:    br label [[OMP_PAR_REGION:%.*]]
938 // CHECK4:       omp.par.outlined.exit.exitStub:
939 // CHECK4-NEXT:    ret void
940 // CHECK4:       omp.par.region:
941 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG57:![0-9]+]]
942 // CHECK4-NEXT:    call void @_Z3fooIPPcEvT_(i8** [[TMP2]]), !dbg [[DBG57]]
943 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META58:![0-9]+]], metadata !DIExpression()), !dbg [[DBG65:![0-9]+]]
944 // CHECK4-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG65]]
945 // CHECK4-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG65]]
946 // CHECK4-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG65]]
947 // CHECK4-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG65]]
948 // CHECK4-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG66:![0-9]+]]
949 // CHECK4:       omp.par.pre_finalize:
950 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG66]]
951 //
952 //
953 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
954 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5]] comdat !dbg [[DBG67:![0-9]+]] {
955 // CHECK4-NEXT:  entry:
956 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
957 // CHECK4-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
958 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71:![0-9]+]]
959 // CHECK4-NEXT:    ret void, !dbg [[DBG71]]
960 //
961 //