1// RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
2
3// CHECK-LABEL: define void @test_stand_alone_directives()
4llvm.func @test_stand_alone_directives() {
5  // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
6  // CHECK-NEXT:  call void @__kmpc_barrier(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD]])
7  omp.barrier
8
9  // CHECK: [[OMP_THREAD1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
10  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskwait(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD1]])
11  omp.taskwait
12
13  // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
14  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskyield(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD2]], i32 0)
15  omp.taskyield
16
17  // CHECK-NEXT:    ret void
18  llvm.return
19}
20
21// CHECK-LABEL: define void @test_flush_construct(i32 %0)
22llvm.func @test_flush_construct(%arg0: i32) {
23  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
24  omp.flush
25
26  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
27  omp.flush (%arg0 : i32)
28
29  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
30  omp.flush (%arg0, %arg0 : i32, i32)
31
32  %0 = llvm.mlir.constant(1 : i64) : i64
33  //  CHECK: alloca {{.*}} align 4
34  %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
35  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
36  omp.flush
37  //  CHECK: load i32, i32*
38  %2 = llvm.load %1 : !llvm.ptr<i32>
39
40  // CHECK-NEXT:    ret void
41  llvm.return
42}
43
44// CHECK-LABEL: define void @test_omp_parallel_1()
45llvm.func @test_omp_parallel_1() -> () {
46  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_1:.*]] to {{.*}}
47  omp.parallel {
48    omp.barrier
49    omp.terminator
50  }
51
52  llvm.return
53}
54
55// CHECK: define internal void @[[OMP_OUTLINED_FN_1]]
56  // CHECK: call void @__kmpc_barrier
57
58llvm.func @body(i64)
59
60// CHECK-LABEL: define void @test_omp_parallel_2()
61llvm.func @test_omp_parallel_2() -> () {
62  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]] to {{.*}}
63  omp.parallel {
64    ^bb0:
65      %0 = llvm.mlir.constant(1 : index) : i64
66      %1 = llvm.mlir.constant(42 : index) : i64
67      llvm.call @body(%0) : (i64) -> ()
68      llvm.call @body(%1) : (i64) -> ()
69      llvm.br ^bb1
70
71    ^bb1:
72      %2 = llvm.add %0, %1 : i64
73      llvm.call @body(%2) : (i64) -> ()
74      omp.terminator
75  }
76  llvm.return
77}
78
79// CHECK: define internal void @[[OMP_OUTLINED_FN_2]]
80  // CHECK-LABEL: omp.par.region:
81  // CHECK: br label %omp.par.region1
82  // CHECK-LABEL: omp.par.region1:
83  // CHECK: call void @body(i64 1)
84  // CHECK: call void @body(i64 42)
85  // CHECK: br label %omp.par.region2
86  // CHECK-LABEL: omp.par.region2:
87  // CHECK: call void @body(i64 43)
88  // CHECK: br label %omp.par.pre_finalize
89
90// CHECK: define void @test_omp_parallel_num_threads_1(i32 %[[NUM_THREADS_VAR_1:.*]])
91llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () {
92  // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_1:.*]])
93  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
94  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]] to {{.*}}
95  omp.parallel num_threads(%arg0: i32) {
96    omp.barrier
97    omp.terminator
98  }
99
100  llvm.return
101}
102
103// CHECK: define internal void @[[OMP_OUTLINED_FN_NUM_THREADS_1]]
104  // CHECK: call void @__kmpc_barrier
105
106// CHECK: define void @test_omp_parallel_num_threads_2()
107llvm.func @test_omp_parallel_num_threads_2() -> () {
108  %0 = llvm.mlir.constant(4 : index) : i32
109  // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_2:.*]])
110  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
111  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]] to {{.*}}
112  omp.parallel num_threads(%0: i32) {
113    omp.barrier
114    omp.terminator
115  }
116
117  llvm.return
118}
119
120// CHECK: define internal void @[[OMP_OUTLINED_FN_NUM_THREADS_2]]
121  // CHECK: call void @__kmpc_barrier
122
123// CHECK: define void @test_omp_parallel_num_threads_3()
124llvm.func @test_omp_parallel_num_threads_3() -> () {
125  %0 = llvm.mlir.constant(4 : index) : i32
126  // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_1:.*]])
127  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
128  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]] to {{.*}}
129  omp.parallel num_threads(%0: i32) {
130    omp.barrier
131    omp.terminator
132  }
133  %1 = llvm.mlir.constant(8 : index) : i32
134  // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_2:.*]])
135  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
136  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]] to {{.*}}
137  omp.parallel num_threads(%1: i32) {
138    omp.barrier
139    omp.terminator
140  }
141
142  llvm.return
143}
144
145// CHECK: define internal void @[[OMP_OUTLINED_FN_NUM_THREADS_3_2]]
146  // CHECK: call void @__kmpc_barrier
147
148// CHECK: define internal void @[[OMP_OUTLINED_FN_NUM_THREADS_3_1]]
149  // CHECK: call void @__kmpc_barrier
150
151// CHECK: define void @test_omp_parallel_if_1(i32 %[[IF_VAR_1:.*]])
152llvm.func @test_omp_parallel_if_1(%arg0: i32) -> () {
153
154// Check that the allocas are emitted by the OpenMPIRBuilder at the top of the
155// function, before the condition. Allocas are only emitted by the builder when
156// the `if` clause is present. We match specific SSA value names since LLVM
157// actually produces those names.
158// CHECK: %tid.addr{{.*}} = alloca i32
159// CHECK: %zero.addr{{.*}} = alloca i32
160
161// CHECK: %[[IF_COND_VAR_1:.*]] = icmp slt i32 %[[IF_VAR_1]], 0
162  %0 = llvm.mlir.constant(0 : index) : i32
163  %1 = llvm.icmp "slt" %arg0, %0 : i32
164
165// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[SI_VAR_IF_1:.*]])
166// CHECK: br i1 %[[IF_COND_VAR_1]], label %[[IF_COND_TRUE_BLOCK_1:.*]], label %[[IF_COND_FALSE_BLOCK_1:.*]]
167// CHECK: [[IF_COND_TRUE_BLOCK_1]]:
168// CHECK: br label %[[OUTLINED_CALL_IF_BLOCK_1:.*]]
169// CHECK: [[OUTLINED_CALL_IF_BLOCK_1]]:
170// CHECK: call void {{.*}} @__kmpc_fork_call(%struct.ident_t* @[[SI_VAR_IF_1]], {{.*}} @[[OMP_OUTLINED_FN_IF_1:.*]] to void
171// CHECK: br label %[[OUTLINED_EXIT_IF_1:.*]]
172// CHECK: [[OUTLINED_EXIT_IF_1]]:
173// CHECK: br label %[[OUTLINED_EXIT_IF_2:.*]]
174// CHECK: [[OUTLINED_EXIT_IF_2]]:
175// CHECK: br label %[[RETURN_BLOCK_IF_1:.*]]
176// CHECK: [[IF_COND_FALSE_BLOCK_1]]:
177// CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
178// CHECK: call void @[[OMP_OUTLINED_FN_IF_1]]
179// CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
180// CHECK: br label %[[RETURN_BLOCK_IF_1]]
181  omp.parallel if(%1 : i1) {
182    omp.barrier
183    omp.terminator
184  }
185
186// CHECK: [[RETURN_BLOCK_IF_1]]:
187// CHECK: ret void
188  llvm.return
189}
190
191// CHECK: define internal void @[[OMP_OUTLINED_FN_IF_1]]
192  // CHECK: call void @__kmpc_barrier
193
194// -----
195
196// CHECK-LABEL: @test_nested_alloca_ip
197llvm.func @test_nested_alloca_ip(%arg0: i32) -> () {
198
199  // Check that the allocas are emitted by the OpenMPIRBuilder at the top of
200  // the function, before the condition. Allocas are only emitted by the
201  // builder when the `if` clause is present. We match specific SSA value names
202  // since LLVM actually produces those names and ensure they come before the
203  // "icmp" that is the first operation we emit.
204  // CHECK: %tid.addr{{.*}} = alloca i32
205  // CHECK: %zero.addr{{.*}} = alloca i32
206  // CHECK: icmp slt i32 %{{.*}}, 0
207  %0 = llvm.mlir.constant(0 : index) : i32
208  %1 = llvm.icmp "slt" %arg0, %0 : i32
209
210  omp.parallel if(%1 : i1) {
211    // The "parallel" operation will be outlined, check the the function is
212    // produced. Inside that function, further allocas should be placed before
213    // another "icmp".
214    // CHECK: define
215    // CHECK: %tid.addr{{.*}} = alloca i32
216    // CHECK: %zero.addr{{.*}} = alloca i32
217    // CHECK: icmp slt i32 %{{.*}}, 1
218    %2 = llvm.mlir.constant(1 : index) : i32
219    %3 = llvm.icmp "slt" %arg0, %2 : i32
220
221    omp.parallel if(%3 : i1) {
222      // One more nesting level.
223      // CHECK: define
224      // CHECK: %tid.addr{{.*}} = alloca i32
225      // CHECK: %zero.addr{{.*}} = alloca i32
226      // CHECK: icmp slt i32 %{{.*}}, 2
227
228      %4 = llvm.mlir.constant(2 : index) : i32
229      %5 = llvm.icmp "slt" %arg0, %4 : i32
230
231      omp.parallel if(%5 : i1) {
232        omp.barrier
233        omp.terminator
234      }
235
236      omp.barrier
237      omp.terminator
238    }
239    omp.barrier
240    omp.terminator
241  }
242
243  llvm.return
244}
245
246// -----
247
248// CHECK-LABEL: define void @test_omp_parallel_3()
249llvm.func @test_omp_parallel_3() -> () {
250  // CHECK: [[OMP_THREAD_3_1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
251  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_1]], i32 2)
252  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_1:.*]] to {{.*}}
253  omp.parallel proc_bind(master) {
254    omp.barrier
255    omp.terminator
256  }
257  // CHECK: [[OMP_THREAD_3_2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
258  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_2]], i32 3)
259  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_2:.*]] to {{.*}}
260  omp.parallel proc_bind(close) {
261    omp.barrier
262    omp.terminator
263  }
264  // CHECK: [[OMP_THREAD_3_3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
265  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_3]], i32 4)
266  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_3:.*]] to {{.*}}
267  omp.parallel proc_bind(spread) {
268    omp.barrier
269    omp.terminator
270  }
271
272  llvm.return
273}
274
275// CHECK: define internal void @[[OMP_OUTLINED_FN_3_3]]
276// CHECK: define internal void @[[OMP_OUTLINED_FN_3_2]]
277// CHECK: define internal void @[[OMP_OUTLINED_FN_3_1]]
278
279// CHECK-LABEL: define void @test_omp_parallel_4()
280llvm.func @test_omp_parallel_4() -> () {
281// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1:.*]] to
282// CHECK: define internal void @[[OMP_OUTLINED_FN_4_1]]
283// CHECK: call void @__kmpc_barrier
284// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1_1:.*]] to
285// CHECK: call void @__kmpc_barrier
286  omp.parallel {
287    omp.barrier
288
289// CHECK: define internal void @[[OMP_OUTLINED_FN_4_1_1]]
290// CHECK: call void @__kmpc_barrier
291    omp.parallel {
292      omp.barrier
293      omp.terminator
294    }
295
296    omp.barrier
297    omp.terminator
298  }
299  llvm.return
300}
301
302llvm.func @test_omp_parallel_5() -> () {
303// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1:.*]] to
304// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1]]
305// CHECK: call void @__kmpc_barrier
306// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1:.*]] to
307// CHECK: call void @__kmpc_barrier
308  omp.parallel {
309    omp.barrier
310
311// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1]]
312    omp.parallel {
313// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1_1:.*]] to
314// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1_1]]
315// CHECK: call void @__kmpc_barrier
316      omp.parallel {
317        omp.barrier
318        omp.terminator
319      }
320      omp.terminator
321    }
322
323    omp.barrier
324    omp.terminator
325  }
326  llvm.return
327}
328
329// CHECK-LABEL: define void @test_omp_master()
330llvm.func @test_omp_master() -> () {
331// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @{{.*}} to
332// CHECK: omp.par.region1:
333  omp.parallel {
334    omp.master {
335// CHECK: [[OMP_THREAD_3_4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
336// CHECK: {{[0-9]+}} = call i32 @__kmpc_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
337// CHECK: omp.master.region
338// CHECK: call void @__kmpc_end_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
339// CHECK: br label %omp_region.end
340      omp.terminator
341    }
342    omp.terminator
343  }
344  omp.parallel {
345    omp.parallel {
346      omp.master {
347        omp.terminator
348      }
349      omp.terminator
350    }
351    omp.terminator
352  }
353  llvm.return
354}
355
356// -----
357
358// CHECK: %struct.ident_t = type
359// CHECK: @[[$parallel_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
360// CHECK: @[[$parallel_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$parallel_loc]], {{.*}}
361
362// CHECK: @[[$wsloop_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
363// CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]], {{.*}}
364
365// CHECK-LABEL: @wsloop_simple
366llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
367  %0 = llvm.mlir.constant(42 : index) : i64
368  %1 = llvm.mlir.constant(10 : index) : i64
369  %2 = llvm.mlir.constant(1 : index) : i64
370  omp.parallel {
371    "omp.wsloop"(%1, %0, %2) ( {
372    ^bb0(%arg1: i64):
373      // The form of the emitted IR is controlled by OpenMPIRBuilder and
374      // tested there. Just check that the right functions are called.
375      // CHECK: call i32 @__kmpc_global_thread_num
376      // CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]],
377      %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
378      %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
379      llvm.store %3, %4 : !llvm.ptr<f32>
380      omp.yield
381      // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]],
382    }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0, 0]> : vector<10xi32>} : (i64, i64, i64) -> ()
383    omp.terminator
384  }
385  llvm.return
386}
387
388// CHECK-LABEL: @wsloop_inclusive_1
389llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr<f32>) {
390  %0 = llvm.mlir.constant(42 : index) : i64
391  %1 = llvm.mlir.constant(10 : index) : i64
392  %2 = llvm.mlir.constant(1 : index) : i64
393  // CHECK: store i64 31, i64* %{{.*}}upperbound
394  "omp.wsloop"(%1, %0, %2) ( {
395  ^bb0(%arg1: i64):
396    %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
397    %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
398    llvm.store %3, %4 : !llvm.ptr<f32>
399    omp.yield
400  }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0, 0]> : vector<10xi32>} : (i64, i64, i64) -> ()
401  llvm.return
402}
403
404// CHECK-LABEL: @wsloop_inclusive_2
405llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr<f32>) {
406  %0 = llvm.mlir.constant(42 : index) : i64
407  %1 = llvm.mlir.constant(10 : index) : i64
408  %2 = llvm.mlir.constant(1 : index) : i64
409  // CHECK: store i64 32, i64* %{{.*}}upperbound
410  "omp.wsloop"(%1, %0, %2) ( {
411  ^bb0(%arg1: i64):
412    %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
413    %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
414    llvm.store %3, %4 : !llvm.ptr<f32>
415    omp.yield
416  }) {inclusive, operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0, 0]> : vector<10xi32>} : (i64, i64, i64) -> ()
417  llvm.return
418}
419
420llvm.func @body(i64)
421
422llvm.func @test_omp_wsloop_dynamic(%lb : i64, %ub : i64, %step : i64) -> () {
423 omp.wsloop (%iv) : i64 = (%lb) to (%ub) step (%step) schedule(dynamic) {
424  // CHECK: call void @__kmpc_dispatch_init_8u
425  // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
426  // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
427  // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
428   llvm.call @body(%iv) : (i64) -> ()
429   omp.yield
430 }
431 llvm.return
432}
433
434llvm.func @test_omp_wsloop_auto(%lb : i64, %ub : i64, %step : i64) -> () {
435 omp.wsloop (%iv) : i64 = (%lb) to (%ub) step (%step) schedule(auto) {
436  // CHECK: call void @__kmpc_dispatch_init_8u
437  // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
438  // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
439  // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
440   llvm.call @body(%iv) : (i64) -> ()
441   omp.yield
442 }
443 llvm.return
444}
445
446llvm.func @test_omp_wsloop_runtime(%lb : i64, %ub : i64, %step : i64) -> () {
447 omp.wsloop (%iv) : i64 = (%lb) to (%ub) step (%step) schedule(runtime) {
448  // CHECK: call void @__kmpc_dispatch_init_8u
449  // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
450  // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
451  // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
452   llvm.call @body(%iv) : (i64) -> ()
453   omp.yield
454 }
455 llvm.return
456}
457
458llvm.func @test_omp_wsloop_guided(%lb : i64, %ub : i64, %step : i64) -> () {
459 omp.wsloop (%iv) : i64 = (%lb) to (%ub) step (%step) schedule(guided) {
460  // CHECK: call void @__kmpc_dispatch_init_8u
461  // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
462  // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
463  // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
464   llvm.call @body(%iv) : (i64) -> ()
465   omp.yield
466 }
467 llvm.return
468}
469