1 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
2 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
3 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
4 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
6 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
7 
8 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
9 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
10 // RUN: %clang_cc1  -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
11 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
12 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
13 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
14 
15 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
16 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
17 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
18 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
19 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
20 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
21 
22 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
23 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1  -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
25 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix SIMD-ONLY
26 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix SIMD-ONLY
28 // SIMD-ONLY-NOT: {{__kmpc|__tgt}}
29 
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 template <class T>
35 struct S {
36   T f;
SS37   S(T a) : f(a) {}
SS38   S() : f() {}
operator TS39   operator T() { return T(); }
~SS40   ~S() {}
41 };
42 
43 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
44 // CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
45 template <typename T>
tmain()46 T tmain() {
47   S<T> test;
48   T t_var = T();
49   T vec[] = {1, 2};
50   S<T> s_arr[] = {1, 2};
51   S<T> &var = test;
52   #pragma omp target teams distribute parallel for simd lastprivate(t_var, vec, s_arr, s_arr, var, var)
53   for (int i = 0; i < 2; ++i) {
54     vec[i] = t_var;
55     s_arr[i] = var;
56   }
57   return T();
58 }
59 
main()60 int main() {
61   static int svar;
62   volatile double g;
63   volatile double &g1 = g;
64 
65   #ifdef LAMBDA
66   // LAMBDA-LABEL: @main
67   // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
68   [&]() {
69     static float sfvar;
70     // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
71     // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams(
72     // LAMBDA: call void [[OFFLOADING_FUN:@.+]](
73 
74     // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]](
75     // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}})
76     #pragma omp target teams distribute parallel for simd lastprivate(g, g1, svar, sfvar)
77     for (int i = 0; i < 2; ++i) {
78       // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* {{.+}}, i32* {{.+}}, {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]], {{.+}} [[G_IN:%.+]])
79       // skip gbl and bound tid
80       // LAMBDA: alloca
81       // LAMBDA: alloca
82       // LAMBDA: [[G1_ADDR:%.+]] = alloca {{.+}},
83       // LAMBDA: [[SVAR_ADDR:%.+]] = alloca {{.+}},
84       // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca {{.+}},
85       // LAMBDA: [[G_ADDR:%.+]] = alloca {{.+}},
86       // LAMBDA: [[G1_REF:%.+]] = alloca double*,
87       // loop variables
88       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
89       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
90       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
91       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
92       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
93       // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
94 
95       // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]],
96       // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]],
97       // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]],
98       // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]],
99 
100       // LAMBDA-64-DAG: [[G_TGT:%.+]] = bitcast {{.+}} [[G_ADDR]] to
101       // LAMBDA-32-DAG: [[G_TGT:%.+]] = load {{.+}}, {{.+}} [[G_ADDR]],
102       // LAMBDA-DAG: [[G1_TGT:%.+]] = load {{.+}}, {{.+}} [[G1_REF]],
103       // LAMBDA-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
104       // LAMBDA-DAG: [[SFVAR_TGT:%.+]] = bitcast {{.+}} [[SFVAR_ADDR]] to
105 
106       g1 = 1;
107       svar = 3;
108       sfvar = 4.0;
109       // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
110       // LAMBDA: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[LPAR_OUTL:.+]] to
111       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
112 
113       // LAMBDA: store i32 2, i32* %
114       // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
115       // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
116       // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
117 
118       // LAMBDA: [[OMP_LASTPRIV_BLOCK]]:
119       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G_TGT]],
120       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G1_TGT]],
121       // LAMBDA-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
122       // LAMBDA-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
123       // LAMBDA-DAG: store {{.+}}, {{.+}} [[SFVAR_TGT]],
124       // LAMBDA: br label %[[OMP_LASTPRIV_DONE]]
125       // LAMBDA: [[OMP_LASTPRIV_DONE]]:
126       // LAMBDA: ret
127 
128       // LAMBDA: define{{.*}} internal{{.*}} void @[[LPAR_OUTL]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]], {{.+}} [[G_IN:%.+]])
129       // skip tid and prev variables
130       // LAMBDA: alloca
131       // LAMBDA: alloca
132       // LAMBDA: alloca
133       // LAMBDA: alloca
134       // LAMBDA: [[G1_ADDR:%.+]] = alloca {{.+}},
135       // LAMBDA: [[SVAR_ADDR:%.+]] = alloca {{.+}},
136       // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca {{.+}},
137       // LAMBDA: [[G_ADDR:%.+]] = alloca {{.+}},
138       // LAMBDA: [[G1_REF:%.+]] = alloca double*,
139       // loop variables
140       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
141       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
142       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
143       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
144       // LAMBDA: {{.+}} = alloca i{{[0-9]+}},
145       // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
146 
147       // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]],
148       // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]],
149       // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]],
150       // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]],
151 
152       // LAMBDA-64-DAG: [[G_TGT:%.+]] = bitcast {{.+}} [[G_ADDR]] to
153       // LAMBDA-32-DAG: [[G_TGT:%.+]] = load {{.+}}, {{.+}} [[G_ADDR]],
154       // LAMBDA-DAG: [[G1_TGT:%.+]] = load {{.+}}, {{.+}} [[G1_REF]],
155       // LAMBDA-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
156       // LAMBDA-DAG: [[SFVAR_TGT:%.+]] = bitcast {{.+}} [[SFVAR_ADDR]] to
157 
158 
159       // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
160       // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](
161       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
162 
163       // LAMBDA: store i32 2, i32* %
164       // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
165       // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
166       // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
167 
168       // LAMBDA: [[OMP_LASTPRIV_BLOCK]]:
169       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G_TGT]],
170       // LAMBDA-DAG: store {{.+}}, {{.+}} [[G1_TGT]],
171       // LAMBDA-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
172       // LAMBDA-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
173       // LAMBDA-DAG: store {{.+}}, {{.+}} [[SFVAR_TGT]],
174       // LAMBDA: br label %[[OMP_LASTPRIV_DONE]]
175       // LAMBDA: [[OMP_LASTPRIV_DONE]]:
176       // LAMBDA: ret
177 
178       [&]() {
179         // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
180         // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
181         g = 2;
182         g1 = 2;
183         svar = 4;
184         sfvar = 8.0;
185         // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
186         // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
187         // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
188         // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
189 
190         // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
191         // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]]
192         // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]],
193         // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
194         // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]]
195         // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]]
196         // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3
197         // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]]
198         // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]]
199       }();
200     }
201   }();
202   return 0;
203   #else
204   S<float> test;
205   int t_var = 0;
206   int vec[] = {1, 2};
207   S<float> s_arr[] = {1, 2};
208   S<float> &var = test;
209 
210   #pragma omp target teams distribute parallel for simd lastprivate(t_var, vec, s_arr, s_arr, var, var, svar)
211   for (int i = 0; i < 2; ++i) {
212     vec[i] = t_var;
213     s_arr[i] = var;
214   }
215   int i;
216 
217   return tmain<int>();
218   #endif
219 }
220 
221 // CHECK: define{{.*}} i{{[0-9]+}} @main()
222 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
223 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
224 // CHECK: call i{{[0-9]+}} @__tgt_target_teams(
225 // CHECK: call void [[OFFLOAD_FUN:@.+]](
226 // CHECK: ret
227 
228 // CHECK: define{{.+}} [[OFFLOAD_FUN]](
229 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(
230 // CHECK: ret
231 //
232 // CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}{{.*}} [[S_VAR_IN:%.+]])
233 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
234 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
235 // CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*,
236 // CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}},
237 // CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
238 // CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
239 // CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}},
240 // CHECK: [[TMP_VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
241 // skip loop variables
242 // CHECK: {{.+}} = alloca i{{[0-9]+}},
243 // CHECK: {{.+}} = alloca i{{[0-9]+}},
244 // CHECK: {{.+}} = alloca i{{[0-9]+}},
245 // CHECK: {{.+}} = alloca i{{[0-9]+}},
246 // CHECK: {{.+}} = alloca i{{[0-9]+}},
247 // CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
248 
249 // copy from parameters to local address variables
250 // CHECK: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]],
251 // CHECK: store {{.+}} [[T_VAR_IN]], {{.+}} [[T_VAR_ADDR]],
252 // CHECK: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]],
253 // CHECK: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]],
254 // CHECK: store {{.+}} [[S_VAR_IN]], {{.+}} [[SVAR_ADDR]],
255 
256 // prepare lastprivate targets
257 // CHECK-64-DAG: [[TVAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to
258 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR]],
259 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR]],
260 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR]],
261 // CHECK-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
262 
263 // the distribute loop
264 // CHECK: call void @__kmpc_for_static_init_4(
265 // CHECK: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[PAR_OUTL:.+]] to
266 // CHECK: call void @__kmpc_for_static_fini(
267 
268 // lastprivates
269 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
270 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
271 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
272 
273 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
274 // CHECK-64-DAG: store {{.+}}, {{.+}} [[TVAR_TGT]],
275 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR]],
276 // CHECK-DAG: [[VEC_TGT_REF:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
277 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_REF]],
278 // CHECK-DAG: [[S_ARR_BEGIN:%.+]] = getelementptr {{.+}} [[S_ARR_TGT]],
279 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(
280 // CHECK-DAG: [[VAR_TGT_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
281 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_TGT_BCAST]],
282 // CHECK-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
283 // CHECK-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
284 // CHECK: ret void
285 
286 // CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}{{.*}} [[S_VAR_IN:%.+]])
287 
288 // gbl and bound tid vars, prev lb and ub vars
289 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
290 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
291 // CHECK: alloca i{{[0-9]+}},
292 // CHECK: alloca i{{[0-9]+}},
293 
294 // CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*,
295 // CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}},
296 // CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
297 // CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
298 // CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}},
299 // CHECK: [[TMP_VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*,
300 // skip loop variables
301 // CHECK: {{.+}} = alloca i{{[0-9]+}},
302 // CHECK: {{.+}} = alloca i{{[0-9]+}},
303 // CHECK: {{.+}} = alloca i{{[0-9]+}},
304 // CHECK: {{.+}} = alloca i{{[0-9]+}},
305 // CHECK: {{.+}} = alloca i{{[0-9]+}},
306 // CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}},
307 
308 // copy from parameters to local address variables
309 // CHECK: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]],
310 // CHECK: store {{.+}} [[T_VAR_IN]], {{.+}} [[T_VAR_ADDR]],
311 // CHECK: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]],
312 // CHECK: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]],
313 // CHECK: store {{.+}} [[S_VAR_IN]], {{.+}} [[SVAR_ADDR]],
314 
315 // prepare lastprivate targets
316 // CHECK-64-DAG: [[TVAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to
317 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR]],
318 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR]],
319 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR]],
320 // CHECK-64-DAG: [[SVAR_TGT:%.+]] = bitcast {{.+}} [[SVAR_ADDR]] to
321 
322 // the distribute loop
323 // CHECK: call void @__kmpc_for_static_init_4(
324 // skip body: code generation routine is same as distribute parallel for lastprivate
325 // CHECK: call void @__kmpc_for_static_fini(
326 
327 // lastprivates
328 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]],
329 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
330 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
331 
332 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
333 // CHECK-64-DAG: store {{.+}}, {{.+}} [[TVAR_TGT]],
334 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR]],
335 // CHECK-DAG: [[VEC_TGT_REF:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
336 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_REF]],
337 // CHECK-DAG: [[S_ARR_BEGIN:%.+]] = getelementptr {{.+}} [[S_ARR_TGT]],
338 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(
339 // CHECK-DAG: [[VAR_TGT_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
340 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_TGT_BCAST]],
341 // CHECK-64-DAG: store {{.+}}, {{.+}} [[SVAR_TGT]],
342 // CHECK-32-DAG: store {{.+}}, {{.+}} [[SVAR_ADDR]],
343 // CHECK: ret void
344 
345 // template tmain
346 // CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]()
347 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
348 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
349 // CHECK: call i{{[0-9]+}} @__tgt_target_teams(
350 // CHECK: call void [[OFFLOAD_FUN_1:@.+]](
351 // CHECK: ret
352 
353 // CHECK: define internal void [[OFFLOAD_FUN_1]](
354 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4,
355 // CHECK: ret
356 
357 // CHECK: define internal void [[OMP_OUTLINED_1:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR1:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN1:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN1:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN1:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN1:%.+]])
358 // skip alloca of global_tid and bound_tid
359 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
360 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
361 // CHECK: [[VEC_ADDR1:%.+]] = alloca [2 x i{{[0-9]+}}]*,
362 // CHECK: [[T_VAR_ADDR1:%.+]] = alloca i{{[0-9]+}},
363 // CHECK: [[S_ARR_ADDR1:%.+]] = alloca [2 x [[S_INT_TY]]]*,
364 // CHECK: [[VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
365 // CHECK: [[TMP_VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
366 // skip loop variables
367 // CHECK: {{.+}} = alloca i{{[0-9]+}},
368 // CHECK: {{.+}} = alloca i{{[0-9]+}},
369 // CHECK: {{.+}} = alloca i{{[0-9]+}},
370 // CHECK: {{.+}} = alloca i{{[0-9]+}},
371 // CHECK: {{.+}} = alloca i{{[0-9]+}},
372 // CHECK: [[OMP_IS_LAST1:%.+]] = alloca i{{[0-9]+}},
373 
374 // copy from parameters to local address variables
375 // CHECK: store {{.+}} [[VEC_IN1]], {{.+}} [[VEC_ADDR1]],
376 // CHECK: store {{.+}} [[T_VAR_IN1]], {{.+}} [[T_VAR_ADDR1]],
377 // CHECK: store {{.+}} [[S_ARR_IN1]], {{.+}} [[S_ARR_ADDR1]],
378 // CHECK: store {{.+}} [[VAR_IN1]], {{.+}} [[VAR_ADDR1]],
379 
380 // prepare lastprivate targets
381 // CHECK-64-DAG: [[T_VAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR1]] to
382 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR1]],
383 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR1]],
384 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR1]],
385 
386 // CHECK: call void @__kmpc_for_static_init_4(
387 // CHECK: call void {{.*}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} @[[TPAR_OUTL:.+]] to
388 // CHECK: call void @__kmpc_for_static_fini(
389 
390 // lastprivates
391 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST1]],
392 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
393 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
394 
395 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
396 // CHECK-64-DAG: store {{.+}}, {{.+}} [[T_VAR_TGT]],
397 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR1]],
398 // CHECK-DAG: [[VEC_TGT_BCAST:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
399 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_BCAST]],
400 // CHECK-DAG: {{.+}} = getelementptr {{.+}} [[S_ARR_TGT]],
401 // CHECK: call void @llvm.memcpy.{{.+}}(
402 // CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
403 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_ADDR_BCAST]],
404 // CHECK: ret void
405 
406 // CHECK: define internal void [[TPAR_OUTL:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR1:%.+]], i{{[0-9]+}}* noalias %{{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN1:%.+]], i{{[0-9]+}}{{.+}} [[T_VAR_IN1:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN1:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN1:%.+]])
407 // skip alloca of global_tid and bound_tid, and prev lb and ub vars
408 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
409 // CHECK: {{.+}} = alloca i{{[0-9]+}}*,
410 // CHECK: alloca i{{[0-9]+}},
411 // CHECK: alloca i{{[0-9]+}},
412 
413 // CHECK: [[VEC_ADDR1:%.+]] = alloca [2 x i{{[0-9]+}}]*,
414 // CHECK: [[T_VAR_ADDR1:%.+]] = alloca i{{[0-9]+}},
415 // CHECK: [[S_ARR_ADDR1:%.+]] = alloca [2 x [[S_INT_TY]]]*,
416 // CHECK: [[VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
417 // CHECK: [[TMP_VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*,
418 // skip loop variables
419 // CHECK: {{.+}} = alloca i{{[0-9]+}},
420 // CHECK: {{.+}} = alloca i{{[0-9]+}},
421 // CHECK: {{.+}} = alloca i{{[0-9]+}},
422 // CHECK: {{.+}} = alloca i{{[0-9]+}},
423 // CHECK: {{.+}} = alloca i{{[0-9]+}},
424 // CHECK: [[OMP_IS_LAST1:%.+]] = alloca i{{[0-9]+}},
425 
426 // copy from parameters to local address variables
427 // CHECK: store {{.+}} [[VEC_IN1]], {{.+}} [[VEC_ADDR1]],
428 // CHECK: store {{.+}} [[T_VAR_IN1]], {{.+}} [[T_VAR_ADDR1]],
429 // CHECK: store {{.+}} [[S_ARR_IN1]], {{.+}} [[S_ARR_ADDR1]],
430 // CHECK: store {{.+}} [[VAR_IN1]], {{.+}} [[VAR_ADDR1]],
431 
432 // prepare lastprivate targets
433 // CHECK-64-DAG: [[T_VAR_TGT:%.+]] = bitcast {{.+}} [[T_VAR_ADDR1]] to
434 // CHECK-DAG: [[VEC_TGT:%.+]] = load {{.+}}, {{.+}} [[VEC_ADDR1]],
435 // CHECK-DAG: [[S_ARR_TGT:%.+]] = load {{.+}}, {{.+}} [[S_ARR_ADDR1]],
436 // CHECK-DAG: [[VAR_TGT:%.+]] = load {{.+}}, {{.+}} [[TMP_VAR_ADDR1]],
437 
438 // CHECK: call void @__kmpc_for_static_init_4(
439 // skip body: code generation routine is same as distribute parallel for lastprivate
440 // CHECK: call void @__kmpc_for_static_fini(
441 
442 // lastprivates
443 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST1]],
444 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0
445 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]]
446 
447 // CHECK: [[OMP_LASTPRIV_BLOCK]]:
448 // CHECK-64-DAG: store {{.+}}, {{.+}} [[T_VAR_TGT]],
449 // CHECK-32-DAG: store {{.+}}, {{.+}} [[T_VAR_ADDR1]],
450 // CHECK-DAG: [[VEC_TGT_BCAST:%.+]] = bitcast {{.+}} [[VEC_TGT]] to
451 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_TGT_BCAST]],
452 // CHECK-DAG: {{.+}} = getelementptr {{.+}} [[S_ARR_TGT]],
453 // CHECK: call void @llvm.memcpy.{{.+}}(
454 // CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VAR_TGT]] to
455 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VAR_ADDR_BCAST]],
456 // CHECK: ret void
457 
458 #endif
459