1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 
7 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
10 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
12 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 enum omp_allocator_handle_t {
18   omp_null_allocator = 0,
19   omp_default_mem_alloc = 1,
20   omp_large_cap_mem_alloc = 2,
21   omp_const_mem_alloc = 3,
22   omp_high_bw_mem_alloc = 4,
23   omp_low_lat_mem_alloc = 5,
24   omp_cgroup_mem_alloc = 6,
25   omp_pteam_mem_alloc = 7,
26   omp_thread_mem_alloc = 8,
27   KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
28 };
29 
30 template <class T>
31 struct S {
32   T f;
SS33   S(T a) : f(a) {}
SS34   S() : f() {}
35   S<T> &operator=(const S<T> &);
operator TS36   operator T() { return T(); }
~SS37   ~S() {}
38 };
39 
40 volatile int g = 1212;
41 volatile int &g1 = g;
42 float f;
43 char cnt;
44 
45 struct SS {
46   int a;
47   int b : 4;
48   int &c;
SSSS49   SS(int &d) : a(0), b(0), c(d) {
50 #pragma omp parallel
51 #pragma omp for linear(a, b, c)
52     for (int i = 0; i < 2; ++i)
53 #ifdef LAMBDA
54       [&]() {
55         ++this->a, --b, (this)->c /= 1;
56 #pragma omp parallel
57 #pragma omp for linear(a, b) linear(ref(c))
58         for (int i = 0; i < 2; ++i)
59           ++(this)->a, --b, this->c /= 1;
60       }();
61 #elif defined(BLOCKS)
62       ^{
63         ++a;
64         --this->b;
65         (this)->c /= 1;
66 #pragma omp parallel
67 #pragma omp for linear(a, b) linear(uval(c))
68         for (int i = 0; i < 2; ++i)
69           ++(this)->a, --b, this->c /= 1;
70       }();
71 #else
72       ++this->a, --b, c /= 1;
73 #endif
74   }
75 };
76 
77 template <typename T>
78 struct SST {
79   T a;
SSTSST80   SST() : a(T()) {
81 #pragma omp parallel
82 #pragma omp for linear(a)
83     for (int i = 0; i < 2; ++i)
84 #ifdef LAMBDA
85       [&]() {
86         [&]() {
87           ++this->a;
88 #pragma omp parallel
89 #pragma omp for linear(a)
90           for (int i = 0; i < 2; ++i)
91             ++(this)->a;
92         }();
93       }();
94 #elif defined(BLOCKS)
95       ^{
96         ^{
97           ++a;
98 #pragma omp parallel
99 #pragma omp for linear(a)
100           for (int i = 0; i < 2; ++i)
101             ++(this)->a;
102         }();
103       }();
104 #else
105       ++(this)->a;
106 #endif
107   }
108 };
109 
110 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
111 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
112 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
113 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
114 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
115 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
116 // CHECK-DAG: [[F:@.+]] ={{.*}} global float 0.0
117 // CHECK-DAG: [[CNT:@.+]] ={{.*}} global i8 0
118 template <typename T>
tmain()119 T tmain() {
120   S<T> test;
121   SST<T> sst;
122   T *pvar = &test.f;
123   T &lvar = test.f;
124 #pragma omp parallel
125 #pragma omp for linear(pvar, lvar)
126   for (int i = 0; i < 2; ++i) {
127     ++pvar, ++lvar;
128   }
129   return T();
130 }
131 
main()132 int main() {
133   static int sivar;
134   SS ss(sivar);
135 #ifdef LAMBDA
136   // LAMBDA: [[G:@.+]] ={{.*}} global i{{[0-9]+}} 1212,
137   // LAMBDA-LABEL: @main
138   // LAMBDA: alloca [[SS_TY]],
139   // LAMBDA: alloca [[CAP_TY:%.+]],
140   // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]*
141   [&]() {
142   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
143   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
144 #pragma omp parallel
145 #pragma omp for linear(g, g1:5)
146   for (int i = 0; i < 2; ++i) {
147     // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
148     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
149     // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
150     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
151     // LAMBDA: store i8
152     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
153     // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
154     // LAMBDA: ret
155 
156     // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
157     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
158     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
159     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
160     // LAMBDA: call void @__kmpc_for_static_init_4(
161     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
162     // LAMBDA: call{{.*}} void
163     // LAMBDA: call void @__kmpc_for_static_fini(
164     // LAMBDA: br i1
165     // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
166     // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]],
167     // LAMBDA: br label
168     // LAMBDA: ret void
169 
170     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
171     // LAMBDA: alloca i{{[0-9]+}},
172     // LAMBDA: alloca i{{[0-9]+}},
173     // LAMBDA: alloca i{{[0-9]+}},
174     // LAMBDA: alloca i{{[0-9]+}},
175     // LAMBDA: alloca i{{[0-9]+}},
176     // LAMBDA: alloca i{{[0-9]+}},
177     // LAMBDA: alloca i{{[0-9]+}},
178     // LAMBDA: alloca i{{[0-9]+}},
179     // LAMBDA: alloca i{{[0-9]+}},
180     // LAMBDA: alloca i{{[0-9]+}},
181     // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
182     // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
183     // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
184     // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
185     // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
186     // LAMBDA: call void @__kmpc_for_static_init_4(
187     // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
188     // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
189     // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
190     // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
191     // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
192     // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
193     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
194     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
195     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
196     // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
197     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
198     // LAMBDA: call void @__kmpc_for_static_fini(
199     // LAMBDA: br i1
200     // LAMBDA: br label
201     // LAMBDA: ret void
202 
203     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
204     // LAMBDA: alloca i{{[0-9]+}},
205     // LAMBDA: alloca i{{[0-9]+}},
206     // LAMBDA: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}},
207     // LAMBDA: alloca i{{[0-9]+}},
208     // LAMBDA: alloca i{{[0-9]+}},
209     // LAMBDA: alloca i{{[0-9]+}},
210     // LAMBDA: alloca i{{[0-9]+}},
211     // LAMBDA: alloca i{{[0-9]+}},
212     // LAMBDA: alloca i{{[0-9]+}},
213     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
214     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
215     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
216     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
217     // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]]
218     // LAMBDA: [[CNT:%.+]] = load i32, i32*
219     // LAMBDA: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5
220     // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]]
221     // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
222     // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]],
223     // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], 5
224     // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
225     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
226     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
227     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* {{[^,]*}} [[ARG]])
228     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
229     g += 5;
230     g1 += 5;
231     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
232     [&]() {
233       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* {{[^,]*}} [[ARG_PTR:%.+]])
234       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
235       g = 2;
236       g1 = 2;
237       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
238       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
239       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
240       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
241     }();
242   }
243   }();
244   return 0;
245 #elif defined(BLOCKS)
246   // BLOCKS: [[G:@.+]] ={{.*}} global i{{[0-9]+}} 1212,
247   // BLOCKS-LABEL: @main
248   // BLOCKS: call
249   // BLOCKS: call void {{%.+}}(i8
250   ^{
251   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
252   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
253 #pragma omp parallel
254 #pragma omp for linear(g, g1:5)
255   for (int i = 0; i < 2; ++i) {
256     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
257     // BLOCKS: alloca i{{[0-9]+}},
258     // BLOCKS: alloca i{{[0-9]+}},
259     // BLOCKS: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}},
260     // BLOCKS: alloca i{{[0-9]+}},
261     // BLOCKS: alloca i{{[0-9]+}},
262     // BLOCKS: alloca i{{[0-9]+}},
263     // BLOCKS: alloca i{{[0-9]+}},
264     // BLOCKS: alloca i{{[0-9]+}},
265     // BLOCKS: alloca i{{[0-9]+}},
266     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
267     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
268     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
269     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
270     // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]]
271     // BLOCKS: [[CNT:%.+]] = load i32, i32*
272     // BLOCKS: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5
273     // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]]
274     // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
275     // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]],
276     // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], 5
277     // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
278     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
279     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
280     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
281     // BLOCKS: call void {{%.+}}(i8
282     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
283     g += 5;
284     g1 += 5;
285     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
286     g = 1;
287     g1 = 5;
288     ^{
289       // BLOCKS: define {{.+}} void {{@.+}}(i8*
290       g = 2;
291       g1 = 2;
292       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
293       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
294       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
295       // BLOCKS: ret
296     }();
297   }
298   }();
299   return 0;
300 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
301 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
302 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
303 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
304 // BLOCKS: store i8
305 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
306 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
307 // BLOCKS: ret
308 
309 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
310 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
311 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
312 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
313 // BLOCKS: call void @__kmpc_for_static_init_4(
314 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
315 // BLOCKS: call{{.*}} void
316 // BLOCKS: call void @__kmpc_for_static_fini(
317 // BLOCKS: br i1
318 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
319 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]],
320 // BLOCKS: br label
321 // BLOCKS: ret void
322 
323 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
324 // BLOCKS: alloca i{{[0-9]+}},
325 // BLOCKS: alloca i{{[0-9]+}},
326 // BLOCKS: alloca i{{[0-9]+}},
327 // BLOCKS: alloca i{{[0-9]+}},
328 // BLOCKS: alloca i{{[0-9]+}},
329 // BLOCKS: alloca i{{[0-9]+}},
330 // BLOCKS: alloca i{{[0-9]+}},
331 // BLOCKS: alloca i{{[0-9]+}},
332 // BLOCKS: alloca i{{[0-9]+}},
333 // BLOCKS: alloca i{{[0-9]+}},
334 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
335 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
336 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
337 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
338 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
339 // BLOCKS: call void @__kmpc_for_static_init_4(
340 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
341 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
342 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
343 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
344 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
345 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
346 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
347 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
348 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
349 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
350 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
351 // BLOCKS: call void @__kmpc_for_static_fini(
352 // BLOCKS: br i1
353 // BLOCKS: br label
354 // BLOCKS: ret void
355 #else
356   S<float> test;
357   float *pvar = &test.f;
358   long long lvar = 0;
359 #pragma omp parallel
360 #pragma omp for linear(pvar, lvar : 3) allocate(omp_low_lat_mem_alloc: lvar)
361   for (int i = 0; i < 2; ++i) {
362     pvar += 3, lvar += 3;
363   }
364   return tmain<int>();
365 #endif
366 }
367 
368 // CHECK: define{{.*}} i{{[0-9]+}} @main()
369 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
370 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* {{[^,]*}} [[TEST]])
371 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 2, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float**, i64*)* [[MAIN_MICROTASK:@.+]] to void
372 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
373 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
374 // CHECK: ret
375 
376 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, float** nonnull align 8 dereferenceable(8) %{{.+}}, i64* nonnull align 8 dereferenceable(8) %{{.+}})
377 // CHECK: alloca i{{[0-9]+}},
378 // CHECK: alloca i{{[0-9]+}},
379 // CHECK: [[PVAR_START:%.+]] = alloca float*,
380 // CHECK: [[LVAR_START:%.+]] = alloca i64,
381 // CHECK: alloca i{{[0-9]+}},
382 // CHECK: alloca i{{[0-9]+}},
383 // CHECK: alloca i{{[0-9]+}},
384 // CHECK: alloca i{{[0-9]+}},
385 // CHECK: [[PVAR_PRIV:%.+]] = alloca float*,
386 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
387 
388 // Check for default initialization.
389 // CHECK: [[PVAR_REF:%.+]] = load float**, float*** %
390 // CHECK: [[LVAR_REF:%.+]] = load i64*, i64** %
391 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_REF]],
392 // CHECK: store float* [[PVAR_VAL]], float** [[PVAR_START]],
393 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_REF]],
394 // CHECK: store i64 [[LVAR_VAL]], i64* [[LVAR_START]],
395 // CHECK: [[LVAR_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID:%.+]], i64 8, i8* inttoptr (i64 5 to i8*))
396 // CHECK: [[LVAR_PRIV:%.+]] = bitcast i8* [[LVAR_VOID_PTR]] to i64*
397 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
398 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_START]],
399 // CHECK: [[CNT:%.+]] = load i32, i32*
400 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3
401 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64
402 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 [[IDX]]
403 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]],
404 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_START]],
405 // CHECK: [[CNT:%.+]] = load i32, i32*
406 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3
407 // CHECK: [[CONV:%.+]] = sext i32 [[MUL]] to i64
408 // CHECK: [[VAL:%.+]] = add nsw i64 [[LVAR_VAL]], [[CONV]]
409 // CHECK: store i64 [[VAL]], i64* [[LVAR_PRIV]],
410 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_PRIV]]
411 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 3
412 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]],
413 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_PRIV]],
414 // CHECK: [[ADD:%.+]] = add nsw i64 [[LVAR_VAL]], 3
415 // CHECK: store i64 [[ADD]], i64* [[LVAR_PRIV]],
416 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
417 // CHECK: [[LVAR_VOID_PTR:%.+]] = bitcast i64* [[LVAR_PRIV]] to i8*
418 // CHECK: call void @__kmpc_free(i32 [[GTID]], i8* [[LVAR_VOID_PTR]], i8* inttoptr (i64 5 to i8*))
419 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
420 // CHECK: ret void
421 
422 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
423 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
424 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* {{[^,]*}} [[TEST]])
425 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 2, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32**, i32*)* [[TMAIN_MICROTASK:@.+]] to void
426 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
427 // CHECK: ret
428 
429 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
430 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
431 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
432 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
433 // CHECK: store i8
434 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
435 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
436 // CHECK: ret
437 
438 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
439 // CHECK: alloca i{{[0-9]+}},
440 // CHECK: alloca i{{[0-9]+}},
441 // CHECK: alloca i{{[0-9]+}},
442 // CHECK: alloca i{{[0-9]+}},
443 // CHECK: alloca i{{[0-9]+}},
444 // CHECK: alloca i{{[0-9]+}},
445 // CHECK: alloca i{{[0-9]+}},
446 // CHECK: alloca i{{[0-9]+}},
447 // CHECK: alloca i{{[0-9]+}},
448 // CHECK: alloca i{{[0-9]+}},
449 // CHECK: alloca i{{[0-9]+}},
450 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
451 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
452 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
453 // CHECK: call void @__kmpc_barrier(
454 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
455 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
456 // CHECK: call void @__kmpc_for_static_init_4(
457 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
458 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
459 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
460 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
461 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
462 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
463 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
464 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
465 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
466 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
467 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
468 // CHECK: call void @__kmpc_for_static_fini(
469 // CHECK: br i1
470 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
471 // CHECK: store i8 %{{.+}}, i8* [[B_REF]],
472 // CHECK: br label
473 // CHECK: ret void
474 
475 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32** nonnull align 8 dereferenceable(8) %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}})
476 // CHECK: alloca i{{[0-9]+}},
477 // CHECK: alloca i{{[0-9]+}},
478 // CHECK: [[PVAR_START:%.+]] = alloca i32*,
479 // CHECK: [[LVAR_START:%.+]] = alloca i32,
480 // CHECK: alloca i{{[0-9]+}},
481 // CHECK: alloca i{{[0-9]+}},
482 // CHECK: alloca i{{[0-9]+}},
483 // CHECK: alloca i{{[0-9]+}},
484 // CHECK: [[PVAR_PRIV:%.+]] = alloca i32*,
485 // CHECK: [[LVAR_PRIV:%.+]] = alloca i32,
486 // CHECK: [[LVAR_PRIV_REF:%.+]] = alloca i32*,
487 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
488 
489 // Check for default initialization.
490 // CHECK: [[PVAR_REF:%.+]] = load i32**, i32*** %
491 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_REF]],
492 // CHECK: store i32* [[PVAR_VAL]], i32** [[PVAR_START]],
493 // CHECK: [[LVAR_REF:%.+]] = load i32*, i32** %
494 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_REF]],
495 // CHECK: store i32 [[LVAR_VAL]], i32* [[LVAR_START]],
496 // CHECK: store i32* [[LVAR_PRIV]], i32** [[LVAR_PRIV_REF]],
497 
498 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
499 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_START]],
500 // CHECK: [[CNT:%.+]] = load i32, i32*
501 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1
502 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64
503 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i64 [[IDX]]
504 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]],
505 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_START]],
506 // CHECK: [[CNT:%.+]] = load i32, i32*
507 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1
508 // CHECK: [[VAL:%.+]] = add nsw i32 [[LVAR_VAL]], [[MUL]]
509 // CHECK: store i32 [[VAL]], i32* [[LVAR_PRIV]],
510 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_PRIV]]
511 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i32 1
512 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]],
513 // CHECK: [[LVAR_PRIV:%.+]] = load i32*, i32** [[LVAR_PRIV_REF]],
514 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_PRIV]],
515 // CHECK: [[ADD:%.+]] = add nsw i32 [[LVAR_VAL]], 1
516 // CHECK: store i32 [[ADD]], i32* [[LVAR_PRIV]],
517 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
518 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
519 // CHECK: ret void
520 #endif
521 
522