1 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
2 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
3 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
4 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
6 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
7 
8 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
9 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
10 // RUN: %clang_cc1  -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
11 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
12 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
13 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
14 
15 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
16 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
17 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
18 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
19 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
20 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
21 
22 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
23 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1  -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
25 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
26 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
28 // SIMD-ONLY-NOT: {{__kmpc|__tgt}}
29 
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 template <class T>
35 struct S {
36   T f;
SS37   S(T a) : f(a) {}
SS38   S() : f() {}
operator TS39   operator T() { return T(); }
~SS40   ~S() {}
41 };
42 
43 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
44 // CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
45 template <typename T>
tmain()46 T tmain() {
47   S<T> test;
48   T t_var = T();
49   T vec[] = {1, 2};
50   S<T> s_arr[] = {1, 2};
51   S<T> &var = test;
52   #pragma omp target teams distribute parallel for lastprivate(t_var, vec, s_arr, s_arr, var, var)
53   for (int i = 0; i < 2; ++i) {
54     vec[i] = t_var;
55     s_arr[i] = var;
56   }
57   return T();
58 }
59 
main()60 int main() {
61   static int svar;
62   volatile double g;
63   volatile double &g1 = g;
64 
65   #ifdef LAMBDA
66   // LAMBDA-LABEL: @main
67   // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
68   [&]() {
69     static float sfvar;
70     // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
71     // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}},
72     // LAMBDA: call void [[OFFLOADING_FUN:@.+]](
73 
74     // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]](
75     // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}})
76     #pragma omp target teams distribute parallel for lastprivate(g, g1, svar, sfvar)
77     for (int i = 0; i < 2; ++i) {
78       // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* {{.+}}, i32* {{.+}}, {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]], {{.+}} [[G_IN:%.+]])
79       // skip gbl and bound tid
80       // LAMBDA: alloca
81       // LAMBDA: alloca
82       // LAMBDA: [[G1_ADDR:%.+]] = alloca {{.+}},
83       // LAMBDA: [[SVAR_ADDR:%.+]] = alloca {{.+}},
84       // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca {{.+}},
85       // LAMBDA: [[G_ADDR:%.+]] = alloca {{.+}},
86       // LAMBDA: [[G1_REF:%.+]] = alloca double*,
87       // loop variables
88       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
89       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
90       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
91       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
92       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
93       // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
94 
95       // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]],
96       // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]],
97       // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]],
98       // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]],
99 
100       // LAMBDA-64-DAG: [[G_TGT:%.+]] = bitcast {{.+}} [[G_ADDR]] to
101       // LAMBDA-32-DAG: [[G_TGT:%.+]] = load {{.+}}, {{.+}} [[G_ADDR]],
102       // LAMBDA-DAG: [[G1_TGT:%.+]] = load {{.+}}, {{.+}} [[G1_REF]],
103       // LAMBDA-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
104       // LAMBDA-DAG: [[SFVAR_TGT:%.+]] = bitcast {{.+}} [[SFVAR_ADDR]] to
105 
106       g1 = 1;
107       svar = 3;
108       sfvar = 4.0;
109       // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
110       // LAMBDA: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[LPAR_OUTL:.+]] to
111       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
112 
113       // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
114       // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
115       // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
116 
117       // LAMBDA: [[OMP_LASTPRIV_BLOCK]]:
118       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G_TGT]],
119       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G1_TGT]],
120       // LAMBDA-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
121       // LAMBDA-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
122       // LAMBDA-DAG: store {{.+}}, {{.+}} [[SFVAR_TGT]],
123       // LAMBDA: br label %[[OMP_LASTPRIV_DONE]]
124       // LAMBDA: [[OMP_LASTPRIV_DONE]]:
125       // LAMBDA: ret
126 
127       // LAMBDA: define{{.*}} internal{{.*}} void @[[LPAR_OUTL]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]], {{.+}} [[G_IN:%.+]])
128       // skip tid and prev variables
129       // LAMBDA: alloca
130       // LAMBDA: alloca
131       // LAMBDA: alloca
132       // LAMBDA: alloca
133       // LAMBDA: [[G1_ADDR:%.+]] = alloca {{.+}},
134       // LAMBDA: [[SVAR_ADDR:%.+]] = alloca {{.+}},
135       // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca {{.+}},
136       // LAMBDA: [[G_ADDR:%.+]] = alloca {{.+}},
137       // LAMBDA: [[G1_REF:%.+]] = alloca double*,
138       // loop variables
139       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
140       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
141       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
142       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
143       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
144       // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
145 
146       // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]],
147       // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]],
148       // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]],
149       // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]],
150 
151       // LAMBDA-64-DAG: [[G_TGT:%.+]] = bitcast {{.+}} [[G_ADDR]] to
152       // LAMBDA-32-DAG: [[G_TGT:%.+]] = load {{.+}}, {{.+}} [[G_ADDR]],
153       // LAMBDA-DAG: [[G1_TGT:%.+]] = load {{.+}}, {{.+}} [[G1_REF]],
154       // LAMBDA-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
155       // LAMBDA-DAG: [[SFVAR_TGT:%.+]] = bitcast {{.+}} [[SFVAR_ADDR]] to
156 
157 
158       // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
159       // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](
160       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
161 
162       // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
163       // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
164       // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
165 
166       // LAMBDA: [[OMP_LASTPRIV_BLOCK]]:
167       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G_TGT]],
168       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G1_TGT]],
169       // LAMBDA-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
170       // LAMBDA-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
171       // LAMBDA-DAG: store {{.+}}, {{.+}} [[SFVAR_TGT]],
172       // LAMBDA: br label %[[OMP_LASTPRIV_DONE]]
173       // LAMBDA: [[OMP_LASTPRIV_DONE]]:
174       // LAMBDA: ret
175 
176       [&]() {
177         // LAMBDA: define {{.+}} void [[INNER_LAMBDA]]({{.+}} [[ARG_PTR:%.+]])
178         // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
179         g = 2;
180         g1 = 2;
181         svar = 4;
182         sfvar = 8.0;
183         // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
184         // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
185         // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
186         // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
187 
188         // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
189         // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]]
190         // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]],
191         // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
192         // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]]
193         // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]]
194         // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3
195         // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]]
196         // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]]
197       }();
198     }
199   }();
200   return 0;
201   #else
202   S<float> test;
203   int t_var = 0;
204   int vec[] = {1, 2};
205   S<float> s_arr[] = {1, 2};
206   S<float> &var = test;
207 
208   #pragma omp target teams distribute parallel for lastprivate(t_var, vec, s_arr, s_arr, var, var, svar)
209   for (int i = 0; i < 2; ++i) {
210     vec[i] = t_var;
211     s_arr[i] = var;
212   }
213   int i;
214 
215   return tmain<int>();
216   #endif
217 }
218 
219 // CHECK: define{{.*}} i{{[0-9]+}} @main()
220 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
221 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* {{[^,]*}} [[TEST]])
222 // CHECK: call i{{[0-9]+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}},
223 // CHECK: call void [[OFFLOAD_FUN:@.+]](
224 // CHECK: ret
225 
226 // CHECK: define{{.+}} [[OFFLOAD_FUN]](
227 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(
228 // CHECK: ret
229 //
230 // CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}{{.*}} [[S_VAR_IN:%.+]])
231 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
232 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
233 // CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*,
234 // CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}},
235 // CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
236 // CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
237 // CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}},
238 // CHECK: [[TMP_VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
239 // skip loop variables
240 // CHECK: {{.+}} = alloca i{{[0-9]+}},
241 // CHECK: {{.+}} = alloca i{{[0-9]+}},
242 // CHECK: {{.+}} = alloca i{{[0-9]+}},
243 // CHECK: {{.+}} = alloca i{{[0-9]+}},
244 // CHECK: {{.+}} = alloca i{{[0-9]+}},
245 // CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
246 
247 // copy from parameters to local address variables
248 // CHECK: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]],
249 // CHECK: store {{.+}} [[T_VAR_IN]], {{.+}} [[T_VAR_ADDR]],
250 // CHECK: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]],
251 // CHECK: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]],
252 // CHECK: store {{.+}} [[S_VAR_IN]], {{.+}} [[SVAR_ADDR]],
253 
254 // prepare lastprivate targets
255 // CHECK-64-DAG: [[TVAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to
256 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR]],
257 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR]],
258 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR]],
259 // CHECK-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
260 
261 // the distribute loop
262 // CHECK: call void @__kmpc_for_static_init_4(
263 // CHECK: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[PAR_OUTL:.+]] to
264 // CHECK: call void @__kmpc_for_static_fini(
265 
266 // lastprivates
267 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
268 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
269 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
270 
271 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
272 // CHECK-64-DAG: store {{.+}}, {{.+}} [[TVAR_TGT]],
273 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR]],
274 // CHECK-DAG: [[VEC_TGT_REF:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
275 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_REF]],
276 // CHECK-DAG: [[S_ARR_BEGIN:%.+]] = getelementptr {{.+}} [[S_ARR_TGT]],
277 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(
278 // CHECK-DAG: [[VAR_TGT_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
279 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_TGT_BCAST]],
280 // CHECK-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
281 // CHECK-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
282 // CHECK: ret void
283 
284 // CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}{{.*}} [[S_VAR_IN:%.+]])
285 
286 // gbl and bound tid vars, prev lb and ub vars
287 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
288 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
289 // CHECK: alloca i{{[0-9]+}},
290 // CHECK: alloca i{{[0-9]+}},
291 
292 // CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*,
293 // CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}},
294 // CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
295 // CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
296 // CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}},
297 // CHECK: [[TMP_VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
298 // skip loop variables
299 // CHECK: {{.+}} = alloca i{{[0-9]+}},
300 // CHECK: {{.+}} = alloca i{{[0-9]+}},
301 // CHECK: {{.+}} = alloca i{{[0-9]+}},
302 // CHECK: {{.+}} = alloca i{{[0-9]+}},
303 // CHECK: {{.+}} = alloca i{{[0-9]+}},
304 // CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
305 
306 // copy from parameters to local address variables
307 // CHECK: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]],
308 // CHECK: store {{.+}} [[T_VAR_IN]], {{.+}} [[T_VAR_ADDR]],
309 // CHECK: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]],
310 // CHECK: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]],
311 // CHECK: store {{.+}} [[S_VAR_IN]], {{.+}} [[SVAR_ADDR]],
312 
313 // prepare lastprivate targets
314 // CHECK-64-DAG: [[TVAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to
315 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR]],
316 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR]],
317 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR]],
318 // CHECK-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
319 
320 // the distribute loop
321 // CHECK: call void @__kmpc_for_static_init_4(
322 // skip body: code generation routine is same as distribute parallel for lastprivate
323 // CHECK: call void @__kmpc_for_static_fini(
324 
325 // lastprivates
326 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
327 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
328 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
329 
330 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
331 // CHECK-64-DAG: store {{.+}}, {{.+}} [[TVAR_TGT]],
332 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR]],
333 // CHECK-DAG: [[VEC_TGT_REF:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
334 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_REF]],
335 // CHECK-DAG: [[S_ARR_BEGIN:%.+]] = getelementptr {{.+}} [[S_ARR_TGT]],
336 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(
337 // CHECK-DAG: [[VAR_TGT_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
338 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_TGT_BCAST]],
339 // CHECK-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
340 // CHECK-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
341 // CHECK: ret void
342 
343 // template tmain
344 // CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]()
345 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
346 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* {{[^,]*}} [[TEST]])
347 // CHECK: call i{{[0-9]+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}},
348 // CHECK: call void [[OFFLOAD_FUN_1:@.+]](
349 // CHECK: ret
350 
351 // CHECK: define internal void [[OFFLOAD_FUN_1]](
352 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4,
353 // CHECK: ret
354 
355 // CHECK: define internal void [[OMP_OUTLINED_1:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR1:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN1:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN1:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN1:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN1:%.+]])
356 // skip alloca of global_tid and bound_tid
357 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
358 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
359 // CHECK: [[VEC_ADDR1:%.+]] = alloca [2 x i{{[0-9]+}}]*,
360 // CHECK: [[T_VAR_ADDR1:%.+]] = alloca i{{[0-9]+}},
361 // CHECK: [[S_ARR_ADDR1:%.+]] = alloca [2 x [[S_INT_TY]]]*,
362 // CHECK: [[VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
363 // CHECK: [[TMP_VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
364 // skip loop variables
365 // CHECK: {{.+}} = alloca i{{[0-9]+}},
366 // CHECK: {{.+}} = alloca i{{[0-9]+}},
367 // CHECK: {{.+}} = alloca i{{[0-9]+}},
368 // CHECK: {{.+}} = alloca i{{[0-9]+}},
369 // CHECK: {{.+}} = alloca i{{[0-9]+}},
370 // CHECK: [[OMP_IS_LAST1:%.+]] = alloca i{{[0-9]+}},
371 
372 // copy from parameters to local address variables
373 // CHECK: store {{.+}} [[VEC_IN1]], {{.+}} [[VEC_ADDR1]],
374 // CHECK: store {{.+}} [[T_VAR_IN1]], {{.+}} [[T_VAR_ADDR1]],
375 // CHECK: store {{.+}} [[S_ARR_IN1]], {{.+}} [[S_ARR_ADDR1]],
376 // CHECK: store {{.+}} [[VAR_IN1]], {{.+}} [[VAR_ADDR1]],
377 
378 // prepare lastprivate targets
379 // CHECK-64-DAG: [[T_VAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR1]] to
380 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR1]],
381 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR1]],
382 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR1]],
383 
384 // CHECK: call void @__kmpc_for_static_init_4(
385 // CHECK: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[TPAR_OUTL:.+]] to
386 // CHECK: call void @__kmpc_for_static_fini(
387 
388 // lastprivates
389 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST1]],
390 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
391 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
392 
393 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
394 // CHECK-64-DAG: store {{.+}}, {{.+}} [[T_VAR_TGT]],
395 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR1]],
396 // CHECK-DAG: [[VEC_TGT_BCAST:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
397 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_BCAST]],
398 // CHECK-DAG: {{.+}} = getelementptr {{.+}} [[S_ARR_TGT]],
399 // CHECK: call void @llvm.memcpy.{{.+}}(
400 // CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
401 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_ADDR_BCAST]],
402 // CHECK: ret void
403 
404 // CHECK: define internal void [[TPAR_OUTL:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR1:%.+]], i{{[0-9]+}}* noalias %{{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN1:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN1:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN1:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN1:%.+]])
405 // skip alloca of global_tid and bound_tid, and prev lb and ub vars
406 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
407 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
408 // CHECK: alloca i{{[0-9]+}},
409 // CHECK: alloca i{{[0-9]+}},
410 
411 // CHECK: [[VEC_ADDR1:%.+]] = alloca [2 x i{{[0-9]+}}]*,
412 // CHECK: [[T_VAR_ADDR1:%.+]] = alloca i{{[0-9]+}},
413 // CHECK: [[S_ARR_ADDR1:%.+]] = alloca [2 x [[S_INT_TY]]]*,
414 // CHECK: [[VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
415 // CHECK: [[TMP_VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
416 // skip loop variables
417 // CHECK: {{.+}} = alloca i{{[0-9]+}},
418 // CHECK: {{.+}} = alloca i{{[0-9]+}},
419 // CHECK: {{.+}} = alloca i{{[0-9]+}},
420 // CHECK: {{.+}} = alloca i{{[0-9]+}},
421 // CHECK: {{.+}} = alloca i{{[0-9]+}},
422 // CHECK: [[OMP_IS_LAST1:%.+]] = alloca i{{[0-9]+}},
423 
424 // copy from parameters to local address variables
425 // CHECK: store {{.+}} [[VEC_IN1]], {{.+}} [[VEC_ADDR1]],
426 // CHECK: store {{.+}} [[T_VAR_IN1]], {{.+}} [[T_VAR_ADDR1]],
427 // CHECK: store {{.+}} [[S_ARR_IN1]], {{.+}} [[S_ARR_ADDR1]],
428 // CHECK: store {{.+}} [[VAR_IN1]], {{.+}} [[VAR_ADDR1]],
429 
430 // prepare lastprivate targets
431 // CHECK-64-DAG: [[T_VAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR1]] to
432 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR1]],
433 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR1]],
434 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR1]],
435 
436 // CHECK: call void @__kmpc_for_static_init_4(
437 // skip body: code generation routine is same as distribute parallel for lastprivate
438 // CHECK: call void @__kmpc_for_static_fini(
439 
440 // lastprivates
441 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST1]],
442 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
443 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
444 
445 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
446 // CHECK-64-DAG: store {{.+}}, {{.+}} [[T_VAR_TGT]],
447 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR1]],
448 // CHECK-DAG: [[VEC_TGT_BCAST:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
449 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_BCAST]],
450 // CHECK-DAG: {{.+}} = getelementptr {{.+}} [[S_ARR_TGT]],
451 // CHECK: call void @llvm.memcpy.{{.+}}(
452 // CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
453 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_ADDR_BCAST]],
454 // CHECK: ret void
455 
456 #endif
457