1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK
5 // expected-no-diagnostics
6 #ifndef HEADER
7 #define HEADER
8 
9 template<typename tx>
ftemplate(int n)10 tx ftemplate(int n) {
11   tx b[10];
12 
13   #pragma omp target
14   {
15     tx d = n;
16     #pragma omp parallel for
17     for(int i=0; i<10; i++) {
18       b[i] += d;
19     }
20     b[3] += 1;
21   }
22 
23   return b[3];
24 }
25 
bar(int n)26 int bar(int n){
27   int a = 0;
28 
29   a += ftemplate<int>(n);
30 
31   return a;
32 }
33 
34 #endif
35 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker
36 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
37 // CHECK1-NEXT:  entry:
38 // CHECK1-NEXT:    [[WORK_FN:%.*]] = alloca i8*, align 8
39 // CHECK1-NEXT:    [[EXEC_STATUS:%.*]] = alloca i8, align 1
40 // CHECK1-NEXT:    store i8* null, i8** [[WORK_FN]], align 8
41 // CHECK1-NEXT:    store i8 0, i8* [[EXEC_STATUS]], align 1
42 // CHECK1-NEXT:    br label [[DOTAWAIT_WORK:%.*]]
43 // CHECK1:       .await.work:
44 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
45 // CHECK1-NEXT:    [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]])
46 // CHECK1-NEXT:    [[TMP1:%.*]] = zext i1 [[TMP0]] to i8
47 // CHECK1-NEXT:    store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1
48 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8
49 // CHECK1-NEXT:    [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null
50 // CHECK1-NEXT:    br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]]
51 // CHECK1:       .select.workers:
52 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1
53 // CHECK1-NEXT:    [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0
54 // CHECK1-NEXT:    br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]]
55 // CHECK1:       .execute.parallel:
56 // CHECK1-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
57 // CHECK1-NEXT:    [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8
58 // CHECK1-NEXT:    [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*)
59 // CHECK1-NEXT:    br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]]
60 // CHECK1:       .execute.fn:
61 // CHECK1-NEXT:    call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]]
62 // CHECK1-NEXT:    br label [[DOTTERMINATE_PARALLEL:%.*]]
63 // CHECK1:       .check.next:
64 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)*
65 // CHECK1-NEXT:    call void [[TMP6]](i16 0, i32 [[TMP4]])
66 // CHECK1-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
67 // CHECK1:       .terminate.parallel:
68 // CHECK1-NEXT:    call void @__kmpc_kernel_end_parallel()
69 // CHECK1-NEXT:    br label [[DOTBARRIER_PARALLEL]]
70 // CHECK1:       .barrier.parallel:
71 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
72 // CHECK1-NEXT:    br label [[DOTAWAIT_WORK]]
73 // CHECK1:       .exit:
74 // CHECK1-NEXT:    ret void
75 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14
76 // CHECK1-SAME: (i64 [[N:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1:[0-9]+]] {
77 // CHECK1-NEXT:  entry:
78 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
79 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
80 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
81 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
82 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
83 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
84 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
85 // CHECK1-NEXT:    [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
86 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
87 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
88 // CHECK1-NEXT:    [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]]
89 // CHECK1-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]]
90 // CHECK1-NEXT:    br i1 [[TMP1]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]]
91 // CHECK1:       .worker:
92 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker() #[[ATTR3]]
93 // CHECK1-NEXT:    br label [[DOTEXIT:%.*]]
94 // CHECK1:       .mastercheck:
95 // CHECK1-NEXT:    [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
96 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
97 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
98 // CHECK1-NEXT:    [[TMP2:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1
99 // CHECK1-NEXT:    [[TMP3:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1
100 // CHECK1-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP2]], -1
101 // CHECK1-NEXT:    [[MASTER_TID:%.*]] = and i32 [[TMP3]], [[TMP4]]
102 // CHECK1-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]]
103 // CHECK1-NEXT:    br i1 [[TMP5]], label [[DOTMASTER:%.*]], label [[DOTEXIT]]
104 // CHECK1:       .master:
105 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
106 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
107 // CHECK1-NEXT:    [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]]
108 // CHECK1-NEXT:    call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1)
109 // CHECK1-NEXT:    call void @__kmpc_data_sharing_init_stack()
110 // CHECK1-NEXT:    [[TMP6:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
111 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8
112 // CHECK1-NEXT:    call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP7]], i16 [[TMP6]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
113 // CHECK1-NEXT:    [[TMP8:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8
114 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, i8* [[TMP8]], i64 0
115 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct._globalized_locals_ty*
116 // CHECK1-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP10]], i32 0, i32 0
117 // CHECK1-NEXT:    [[TMP11:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
118 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
119 // CHECK1-NEXT:    store i32 [[TMP12]], i32* [[D]], align 4
120 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
121 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
122 // CHECK1-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
123 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
124 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i32* [[D]] to i8*
125 // CHECK1-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
126 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
127 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP17]], i64 2)
128 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3
129 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
130 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
131 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
132 // CHECK1-NEXT:    [[TMP19:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
133 // CHECK1-NEXT:    call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP19]])
134 // CHECK1-NEXT:    br label [[DOTTERMINATION_NOTIFIER:%.*]]
135 // CHECK1:       .termination.notifier:
136 // CHECK1-NEXT:    call void @__kmpc_kernel_deinit(i16 1)
137 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
138 // CHECK1-NEXT:    br label [[DOTEXIT]]
139 // CHECK1:       .exit:
140 // CHECK1-NEXT:    ret void
141 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
142 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
143 // CHECK1-NEXT:  entry:
144 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
145 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
146 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
147 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca i32*, align 8
148 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
149 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
150 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
151 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
152 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
153 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
154 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
155 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
156 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
157 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
158 // CHECK1-NEXT:    store i32* [[D]], i32** [[D_ADDR]], align 8
159 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
160 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
161 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
162 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
163 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
164 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
165 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
166 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
167 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
168 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
169 // CHECK1:       omp.dispatch.cond:
170 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
171 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
172 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
173 // CHECK1:       cond.true:
174 // CHECK1-NEXT:    br label [[COND_END:%.*]]
175 // CHECK1:       cond.false:
176 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
177 // CHECK1-NEXT:    br label [[COND_END]]
178 // CHECK1:       cond.end:
179 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
180 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
181 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
182 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
183 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
184 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
185 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
186 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
187 // CHECK1:       omp.dispatch.body:
188 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
189 // CHECK1:       omp.inner.for.cond:
190 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
191 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
192 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
193 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
194 // CHECK1:       omp.inner.for.body:
195 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
196 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
197 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
198 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
199 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
200 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
201 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
202 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
203 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
204 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]]
205 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
206 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
207 // CHECK1:       omp.body.continue:
208 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
209 // CHECK1:       omp.inner.for.inc:
210 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
211 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
212 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
213 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
214 // CHECK1:       omp.inner.for.end:
215 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
216 // CHECK1:       omp.dispatch.inc:
217 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
218 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
219 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
220 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4
221 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
222 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
223 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
224 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4
225 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
226 // CHECK1:       omp.dispatch.end:
227 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
228 // CHECK1-NEXT:    ret void
229 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
230 // CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
231 // CHECK1-NEXT:  entry:
232 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
233 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
234 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
235 // CHECK1-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
236 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
237 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
238 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
239 // CHECK1-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
240 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
241 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
242 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]**
243 // CHECK1-NEXT:    [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8
244 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
245 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
246 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
247 // CHECK1-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]]
248 // CHECK1-NEXT:    ret void
249 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker
250 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
251 // CHECK2-NEXT:  entry:
252 // CHECK2-NEXT:    [[WORK_FN:%.*]] = alloca i8*, align 8
253 // CHECK2-NEXT:    [[EXEC_STATUS:%.*]] = alloca i8, align 1
254 // CHECK2-NEXT:    store i8* null, i8** [[WORK_FN]], align 8
255 // CHECK2-NEXT:    store i8 0, i8* [[EXEC_STATUS]], align 1
256 // CHECK2-NEXT:    br label [[DOTAWAIT_WORK:%.*]]
257 // CHECK2:       .await.work:
258 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
259 // CHECK2-NEXT:    [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]])
260 // CHECK2-NEXT:    [[TMP1:%.*]] = zext i1 [[TMP0]] to i8
261 // CHECK2-NEXT:    store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1
262 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8
263 // CHECK2-NEXT:    [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null
264 // CHECK2-NEXT:    br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]]
265 // CHECK2:       .select.workers:
266 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1
267 // CHECK2-NEXT:    [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0
268 // CHECK2-NEXT:    br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]]
269 // CHECK2:       .execute.parallel:
270 // CHECK2-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
271 // CHECK2-NEXT:    [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8
272 // CHECK2-NEXT:    [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*)
273 // CHECK2-NEXT:    br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]]
274 // CHECK2:       .execute.fn:
275 // CHECK2-NEXT:    call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]]
276 // CHECK2-NEXT:    br label [[DOTTERMINATE_PARALLEL:%.*]]
277 // CHECK2:       .check.next:
278 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)*
279 // CHECK2-NEXT:    call void [[TMP6]](i16 0, i32 [[TMP4]])
280 // CHECK2-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
281 // CHECK2:       .terminate.parallel:
282 // CHECK2-NEXT:    call void @__kmpc_kernel_end_parallel()
283 // CHECK2-NEXT:    br label [[DOTBARRIER_PARALLEL]]
284 // CHECK2:       .barrier.parallel:
285 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
286 // CHECK2-NEXT:    br label [[DOTAWAIT_WORK]]
287 // CHECK2:       .exit:
288 // CHECK2-NEXT:    ret void
289 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14
290 // CHECK2-SAME: (i64 [[N:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1:[0-9]+]] {
291 // CHECK2-NEXT:  entry:
292 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
293 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
294 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
295 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
296 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
297 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
298 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
299 // CHECK2-NEXT:    [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
300 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
301 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
302 // CHECK2-NEXT:    [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]]
303 // CHECK2-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]]
304 // CHECK2-NEXT:    br i1 [[TMP1]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]]
305 // CHECK2:       .worker:
306 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker() #[[ATTR3]]
307 // CHECK2-NEXT:    br label [[DOTEXIT:%.*]]
308 // CHECK2:       .mastercheck:
309 // CHECK2-NEXT:    [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
310 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
311 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
312 // CHECK2-NEXT:    [[TMP2:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1
313 // CHECK2-NEXT:    [[TMP3:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1
314 // CHECK2-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP2]], -1
315 // CHECK2-NEXT:    [[MASTER_TID:%.*]] = and i32 [[TMP3]], [[TMP4]]
316 // CHECK2-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]]
317 // CHECK2-NEXT:    br i1 [[TMP5]], label [[DOTMASTER:%.*]], label [[DOTEXIT]]
318 // CHECK2:       .master:
319 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
320 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
321 // CHECK2-NEXT:    [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]]
322 // CHECK2-NEXT:    call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1)
323 // CHECK2-NEXT:    call void @__kmpc_data_sharing_init_stack()
324 // CHECK2-NEXT:    [[TMP6:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1)
325 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to %struct._globalized_locals_ty*
326 // CHECK2-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP7]], i32 0, i32 0
327 // CHECK2-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
328 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8
329 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[D]], align 4
330 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
331 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
332 // CHECK2-NEXT:    store i8* [[TMP11]], i8** [[TMP10]], align 8
333 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
334 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i32* [[D]] to i8*
335 // CHECK2-NEXT:    store i8* [[TMP13]], i8** [[TMP12]], align 8
336 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
337 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP14]], i64 2)
338 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3
339 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
340 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
341 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
342 // CHECK2-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP6]])
343 // CHECK2-NEXT:    br label [[DOTTERMINATION_NOTIFIER:%.*]]
344 // CHECK2:       .termination.notifier:
345 // CHECK2-NEXT:    call void @__kmpc_kernel_deinit(i16 1)
346 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
347 // CHECK2-NEXT:    br label [[DOTEXIT]]
348 // CHECK2:       .exit:
349 // CHECK2-NEXT:    ret void
350 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
351 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
352 // CHECK2-NEXT:  entry:
353 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
354 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
355 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
356 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca i32*, align 8
357 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
358 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
359 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
360 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
361 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
362 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
363 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
364 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
365 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
366 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
367 // CHECK2-NEXT:    store i32* [[D]], i32** [[D_ADDR]], align 8
368 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
369 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
370 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
371 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
372 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
373 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
374 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
375 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
376 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
377 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
378 // CHECK2:       omp.dispatch.cond:
379 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
380 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
381 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
382 // CHECK2:       cond.true:
383 // CHECK2-NEXT:    br label [[COND_END:%.*]]
384 // CHECK2:       cond.false:
385 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
386 // CHECK2-NEXT:    br label [[COND_END]]
387 // CHECK2:       cond.end:
388 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
389 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
390 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
391 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
392 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
393 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
394 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
395 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
396 // CHECK2:       omp.dispatch.body:
397 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
398 // CHECK2:       omp.inner.for.cond:
399 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
400 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
401 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
402 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
403 // CHECK2:       omp.inner.for.body:
404 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
405 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
406 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
407 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
408 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
409 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
410 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
411 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
412 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
413 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]]
414 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
415 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
416 // CHECK2:       omp.body.continue:
417 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
418 // CHECK2:       omp.inner.for.inc:
419 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
420 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
421 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
422 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
423 // CHECK2:       omp.inner.for.end:
424 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
425 // CHECK2:       omp.dispatch.inc:
426 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
427 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
428 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
429 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4
430 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
431 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
432 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
433 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4
434 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
435 // CHECK2:       omp.dispatch.end:
436 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
437 // CHECK2-NEXT:    ret void
438 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
439 // CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
440 // CHECK2-NEXT:  entry:
441 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
442 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
443 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
444 // CHECK2-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
445 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
446 // CHECK2-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
447 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
448 // CHECK2-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
449 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
450 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
451 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]**
452 // CHECK2-NEXT:    [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8
453 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
454 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
455 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
456 // CHECK2-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]]
457 // CHECK2-NEXT:    ret void
458 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l13
459 // CHECK-SAME: (i64 [[N:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
460 // CHECK-NEXT:  entry:
461 // CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
462 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
463 // CHECK-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
464 // CHECK-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
465 // CHECK-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
466 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
467 // CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
468 // CHECK-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true)
469 // CHECK-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
470 // CHECK-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
471 // CHECK:       user_code.entry:
472 // CHECK-NEXT:    [[D:%.*]] = call i8* @__kmpc_alloc_shared(i64 4)
473 // CHECK-NEXT:    [[D_ON_STACK:%.*]] = bitcast i8* [[D]] to i32*
474 // CHECK-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
475 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
476 // CHECK-NEXT:    store i32 [[TMP3]], i32* [[D_ON_STACK]], align 4
477 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
478 // CHECK-NEXT:    [[TMP5:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
479 // CHECK-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
480 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
481 // CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[D_ON_STACK]] to i8*
482 // CHECK-NEXT:    store i8* [[TMP7]], i8** [[TMP6]], align 8
483 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
484 // CHECK-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP8]], i64 2)
485 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3
486 // CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
487 // CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], 1
488 // CHECK-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
489 // CHECK-NEXT:    call void @__kmpc_free_shared(i8* [[D]], i64 4)
490 // CHECK-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true)
491 // CHECK-NEXT:    ret void
492 // CHECK:       worker.exit:
493 // CHECK-NEXT:    ret void
494 //
495 //
496 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__
497 // CHECK-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
498 // CHECK-NEXT:  entry:
499 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
500 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
501 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
502 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca i32*, align 8
503 // CHECK-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
504 // CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
505 // CHECK-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
506 // CHECK-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
507 // CHECK-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
508 // CHECK-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
509 // CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
510 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
511 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
512 // CHECK-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
513 // CHECK-NEXT:    store i32* [[D]], i32** [[D_ADDR]], align 8
514 // CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
515 // CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
516 // CHECK-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
517 // CHECK-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
518 // CHECK-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
519 // CHECK-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
520 // CHECK-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
521 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
522 // CHECK-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
523 // CHECK-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
524 // CHECK:       omp.dispatch.cond:
525 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
526 // CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
527 // CHECK-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
528 // CHECK:       cond.true:
529 // CHECK-NEXT:    br label [[COND_END:%.*]]
530 // CHECK:       cond.false:
531 // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
532 // CHECK-NEXT:    br label [[COND_END]]
533 // CHECK:       cond.end:
534 // CHECK-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
535 // CHECK-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
536 // CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
537 // CHECK-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
538 // CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
539 // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
540 // CHECK-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
541 // CHECK-NEXT:    br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
542 // CHECK:       omp.dispatch.body:
543 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
544 // CHECK:       omp.inner.for.cond:
545 // CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
546 // CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
547 // CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
548 // CHECK-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
549 // CHECK:       omp.inner.for.body:
550 // CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
551 // CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
552 // CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
553 // CHECK-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
554 // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
555 // CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4
556 // CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
557 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
558 // CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
559 // CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]]
560 // CHECK-NEXT:    store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
561 // CHECK-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
562 // CHECK:       omp.body.continue:
563 // CHECK-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
564 // CHECK:       omp.inner.for.inc:
565 // CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
566 // CHECK-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
567 // CHECK-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
568 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND]]
569 // CHECK:       omp.inner.for.end:
570 // CHECK-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
571 // CHECK:       omp.dispatch.inc:
572 // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
573 // CHECK-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
574 // CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
575 // CHECK-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4
576 // CHECK-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
577 // CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
578 // CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
579 // CHECK-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4
580 // CHECK-NEXT:    br label [[OMP_DISPATCH_COND]]
581 // CHECK:       omp.dispatch.end:
582 // CHECK-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
583 // CHECK-NEXT:    ret void
584 //
585 //
586 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
587 // CHECK-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
588 // CHECK-NEXT:  entry:
589 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
590 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
591 // CHECK-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
592 // CHECK-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
593 // CHECK-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
594 // CHECK-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
595 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
596 // CHECK-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
597 // CHECK-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
598 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
599 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]**
600 // CHECK-NEXT:    [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8
601 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
602 // CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
603 // CHECK-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
604 // CHECK-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR1:[0-9]+]]
605 // CHECK-NEXT:    ret void
606 //
607