1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 
5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12 
13 // CHECK-LABEL: @main
main(int argc,char ** argv)14 int main(int argc, char **argv) {
15 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[DEFLOC:@.+]])
16 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[DEFLOC]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* [[OMP_OUTLINED1:@.+]] to void (i32*, i32*, ...)*), i64 [[PRIORITY:%.+]])
17 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[DEFLOC]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* [[OMP_OUTLINED2:@.+]] to void (i32*, i32*, ...)*), i64 [[GRAINSIZE:%.+]])
18 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[DEFLOC]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8***, i64, i64)* [[OMP_OUTLINED3:@.+]] to void (i32*, i32*, ...)*), i32* [[ARGC:%.+]], i8*** [[ARGV:%.+]], i64 [[COND:%.+]], i64 [[NUM_TASKS:%.+]])
19 // CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
20 // CHECK: call void [[OMP_OUTLINED3]](i32* %{{.+}}, i32* %{{.+}}, i32* [[ARGC]], i8*** [[ARGV]], i64 [[COND]], i64 [[NUM_TASKS]])
21 // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
22 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[DEFLOC]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OMP_OUTLINED4:@.+]] to void (i32*, i32*, ...)*))
23 
24 // CHECK: define internal void [[OMP_OUTLINED1]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i64 %{{.+}})
25 // CHECK: [[PRIO_ADDR:%.+]] = bitcast i64* %{{.+}} to i32*
26 // CHECK:       [[RES:%.+]] = call {{.*}}i32 @__kmpc_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID:%.+]])
27 // CHECK-NEXT:  [[IS_MASTER:%.+]] = icmp ne i32 [[RES]], 0
28 // CHECK-NEXT:  br i1 [[IS_MASTER]], label {{%?}}[[THEN:.+]], label {{%?}}[[EXIT:.+]]
29 // CHECK:       [[THEN]]
30 // CHECK: call void @__kmpc_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
31 // CHECK: [[PRIO:%.+]] = load i32, i32* [[PRIO_ADDR]],
32 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 33, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK1:@.+]] to i32 (i32, i8*)*))
33 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]*
34 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0
35 // CHECK: [[PRIO_ADDR:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 4
36 // CHECK: [[PRIO_ADDR_CAST:%.+]] = bitcast %{{.+}}* [[PRIO_ADDR]] to i32*
37 // CHECK: store i32 [[PRIO]], i32* [[PRIO_ADDR_CAST]],
38 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5
39 // CHECK: store i64 0, i64* [[DOWN]],
40 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6
41 // CHECK: store i64 9, i64* [[UP]],
42 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7
43 // CHECK: store i64 1, i64* [[ST]],
44 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
45 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 0, i64 0, i8* null)
46 // CHECK: call void @__kmpc_end_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
47 // CHECK-NEXT:  call {{.*}}void @__kmpc_end_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
48 // CHECK-NEXT:  br label {{%?}}[[EXIT]]
49 // CHECK:       [[EXIT]]
50 
51 
52 // CHECK: define internal i32 [[TASK1]](
53 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5
54 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]],
55 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6
56 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]],
57 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7
58 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
59 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8
60 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]],
61 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]],
62 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]],
63 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]],
64 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]],
65 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
66 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32
67 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]],
68 // CHECK: br label
69 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],
70 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64
71 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
72 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]]
73 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}}
74 // CHECK: load i32, i32* %
75 // CHECK: store i32 %
76 // CHECK: load i32, i32* %
77 // CHECK: add nsw i32 %{{.+}}, 1
78 // CHECK: store i32 %{{.+}}, i32* %
79 // CHECK: br label %
80 // CHECK: ret i32 0
81 
82 #pragma omp parallel master taskloop priority(argc)
83   for (int i = 0; i < 10; ++i)
84     ;
85 // CHECK: define internal void [[OMP_OUTLINED2]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i64 %{{.+}})
86 // CHECK:       [[RES:%.+]] = call {{.*}}i32 @__kmpc_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID:%.+]])
87 // CHECK-NEXT:  [[IS_MASTER:%.+]] = icmp ne i32 [[RES]], 0
88 // CHECK-NEXT:  br i1 [[IS_MASTER]], label {{%?}}[[THEN:.+]], label {{%?}}[[EXIT:.+]]
89 // CHECK:       [[THEN]]
90 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK2:@.+]] to i32 (i32, i8*)*))
91 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]*
92 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0
93 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5
94 // CHECK: store i64 0, i64* [[DOWN]],
95 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6
96 // CHECK: store i64 9, i64* [[UP]],
97 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7
98 // CHECK: store i64 1, i64* [[ST]],
99 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
100 // CHECK: [[GRAINSIZE:%.+]] = zext i32 %{{.+}} to i64
101 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 1, i64 [[GRAINSIZE]], i8* null)
102 // CHECK-NEXT:  call {{.*}}void @__kmpc_end_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
103 // CHECK-NEXT:  br label {{%?}}[[EXIT]]
104 // CHECK:       [[EXIT]]
105 
106 
107 // CHECK: define internal i32 [[TASK2]](
108 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5
109 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]],
110 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6
111 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]],
112 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7
113 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
114 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8
115 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]],
116 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]],
117 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]],
118 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]],
119 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]],
120 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
121 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32
122 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]],
123 // CHECK: br label
124 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],
125 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64
126 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
127 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]]
128 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}}
129 // CHECK: load i32, i32* %
130 // CHECK: store i32 %
131 // CHECK: load i32, i32* %
132 // CHECK: add nsw i32 %{{.+}}, 1
133 // CHECK: store i32 %{{.+}}, i32* %
134 // CHECK: br label %
135 // CHECK: ret i32 0
136 
137 #pragma omp parallel master taskloop nogroup grainsize(argc)
138   for (int i = 0; i < 10; ++i)
139     ;
140 // CHECK: define internal void [[OMP_OUTLINED3]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, i8*** nonnull align 8 dereferenceable(8) %{{.+}}, i64 %{{.+}}, i64 %{{.+}})
141 // CHECK:       [[RES:%.+]] = call {{.*}}i32 @__kmpc_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID:%.+]])
142 // CHECK-NEXT:  [[IS_MASTER:%.+]] = icmp ne i32 [[RES]], 0
143 // CHECK-NEXT:  br i1 [[IS_MASTER]], label {{%?}}[[THEN:.+]], label {{%?}}[[EXIT:.+]]
144 // CHECK:       [[THEN]]
145 // CHECK: call void @__kmpc_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
146 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 1, i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK3:@.+]] to i32 (i32, i8*)*))
147 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]*
148 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0
149 // CHECK: [[COND_VAL:%.+]] = load i8, i8* %{{.+}},
150 // CHECK: [[COND_BOOL:%.+]] = trunc i8 [[COND_VAL]] to i1
151 // CHECK: [[IF_INT:%.+]] = sext i1 [[COND_BOOL]] to i32
152 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5
153 // CHECK: store i64 0, i64* [[DOWN]],
154 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6
155 // CHECK: store i64 %{{.+}}, i64* [[UP]],
156 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7
157 // CHECK: store i64 1, i64* [[ST]],
158 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
159 // CHECK: [[NUM_TASKS:%.+]] = zext i32 %{{.+}} to i64
160 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 [[IF_INT]], i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 2, i64 [[NUM_TASKS]], i8* null)
161 // CHECK: call void @__kmpc_end_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
162 // CHECK-NEXT:  call {{.*}}void @__kmpc_end_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
163 // CHECK-NEXT:  br label {{%?}}[[EXIT]]
164 // CHECK:       [[EXIT]]
165 
166 // CHECK: define internal i32 [[TASK3]](
167 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5
168 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]],
169 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6
170 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]],
171 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7
172 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
173 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8
174 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]],
175 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]],
176 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]],
177 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]],
178 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]],
179 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
180 // CHECK: store i64 [[LB_VAL]], i64* [[CNT:%.+]],
181 // CHECK: br label
182 // CHECK: ret i32 0
183 
184   int i;
185 #pragma omp parallel master taskloop if(argc) shared(argc, argv) collapse(2) num_tasks(argc)
186   for (i = 0; i < argc; ++i)
187   for (int j = argc; j < argv[argc][argc]; ++j)
188     ;
189 // CHECK:       define internal void [[OMP_OUTLINED4]](i32* {{.+}}, i32* {{.+}})
190 // CHECK:       [[RES:%.+]] = call {{.*}}i32 @__kmpc_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID:%.+]])
191 // CHECK-NEXT:  [[IS_MASTER:%.+]] = icmp ne i32 [[RES]], 0
192 // CHECK-NEXT:  br i1 [[IS_MASTER]], label {{%?}}[[THEN:.+]], label {{%?}}[[EXIT:.+]]
193 // CHECK:       [[THEN]]
194 // CHECK: call void @__kmpc_taskgroup(
195 // CHECK: call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %{{.+}}*)* [[TASK_CANCEL:@.+]] to i32 (i32, i8*)*))
196 // CHECK: call void @__kmpc_taskloop(
197 // CHECK: call void @__kmpc_end_taskgroup(
198 // CHECK-NEXT:  call {{.*}}void @__kmpc_end_master(%struct.ident_t* [[DEFLOC]], i32 [[GTID]])
199 // CHECK-NEXT:  br label {{%?}}[[EXIT]]
200 // CHECK:       [[EXIT]]
201 #pragma omp parallel master taskloop
202   for (int i = 0; i < 10; ++i) {
203 #pragma omp cancel taskgroup
204 #pragma omp cancellation point taskgroup
205   }
206 }
207 
208 // CHECK-LABEL: @_ZN1SC2Ei
209 struct S {
210   int a;
SS211   S(int c) {
212 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[DEFLOC]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S*, i32*, i64)* [[OMP_OUTLINED4:@.+]] to void (i32*, i32*, ...)*), %struct.S* %{{.+}}, i32* %{{.+}}, i64 %{{.+}})
213 
214 // CHECK: define internal void [[OMP_OUTLINED4]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, %struct.S* %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, i64 %{{.+}})
215 // CHECK: [[CONV:%.+]] = bitcast i64* %{{.+}} to i8*
216 // CHECK: [[CONDI8:%.+]] = load i8, i8* [[CONV]],
217 // CHECK: [[COND:%.+]] = trunc i8 [[CONDI8]] to i1
218 // CHECK: [[IS_FINAL:%.+]] = select i1 [[COND:%.+]], i32 2, i32 0
219 // CHECK: [[FLAGS:%.+]] = or i32 [[IS_FINAL]], 1
220 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID:%.+]], i32 [[FLAGS]], i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK4:@.+]] to i32 (i32, i8*)*))
221 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]*
222 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0
223 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5
224 // CHECK: store i64 0, i64* [[DOWN]],
225 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6
226 // CHECK: store i64 %{{.+}}, i64* [[UP]],
227 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7
228 // CHECK: store i64 1, i64* [[ST]],
229 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
230 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 2, i64 4, i8* null)
231 #pragma omp parallel master taskloop shared(c) num_tasks(4) final(c)
232     for (a = 0; a < c; ++a)
233       ;
234   }
235 } s(1);
236 
237 // CHECK: define internal i32 [[TASK4]](
238 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5
239 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]],
240 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6
241 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]],
242 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7
243 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]],
244 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8
245 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]],
246 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]],
247 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]],
248 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]],
249 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]],
250 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
251 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32
252 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]],
253 // CHECK: br label
254 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],
255 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64
256 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
257 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]]
258 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}}
259 // CHECK: load i32, i32* %
260 // CHECK: store i32 %
261 // CHECK: load i32, i32* %
262 // CHECK: add nsw i32 %{{.+}}, 1
263 // CHECK: store i32 %{{.+}}, i32* %
264 // CHECK: br label %
265 // CHECK: ret i32 0
266 
267 #endif
268