1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test device global memory data sharing codegen.
3 ///==========================================================================///
4 
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK
7 
8 // expected-no-diagnostics
9 
10 #ifndef HEADER
11 #define HEADER
12 
test_ds()13 void test_ds(){
14   #pragma omp target
15   {
16     int a = 10;
17     #pragma omp parallel
18     {
19       a = 1000;
20     }
21     int b = 100;
22     int c = 1000;
23     #pragma omp parallel private(c)
24     {
25       int *c1 = &c;
26       b = a + 10000;
27     }
28   }
29 }
30 
31 #endif
32 
33 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15_worker
34 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
35 // CHECK1-NEXT:  entry:
36 // CHECK1-NEXT:    [[WORK_FN:%.*]] = alloca i8*, align 8
37 // CHECK1-NEXT:    [[EXEC_STATUS:%.*]] = alloca i8, align 1
38 // CHECK1-NEXT:    store i8* null, i8** [[WORK_FN]], align 8
39 // CHECK1-NEXT:    store i8 0, i8* [[EXEC_STATUS]], align 1
40 // CHECK1-NEXT:    br label [[DOTAWAIT_WORK:%.*]]
41 // CHECK1:       .await.work:
42 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
43 // CHECK1-NEXT:    [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]])
44 // CHECK1-NEXT:    [[TMP1:%.*]] = zext i1 [[TMP0]] to i8
45 // CHECK1-NEXT:    store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1
46 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8
47 // CHECK1-NEXT:    [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null
48 // CHECK1-NEXT:    br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]]
49 // CHECK1:       .select.workers:
50 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1
51 // CHECK1-NEXT:    [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0
52 // CHECK1-NEXT:    br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]]
53 // CHECK1:       .execute.parallel:
54 // CHECK1-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
55 // CHECK1-NEXT:    [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8
56 // CHECK1-NEXT:    [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*)
57 // CHECK1-NEXT:    br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]]
58 // CHECK1:       .execute.fn:
59 // CHECK1-NEXT:    call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]]
60 // CHECK1-NEXT:    br label [[DOTTERMINATE_PARALLEL:%.*]]
61 // CHECK1:       .check.next:
62 // CHECK1-NEXT:    [[TMP6:%.*]] = load i8*, i8** [[WORK_FN]], align 8
63 // CHECK1-NEXT:    [[WORK_MATCH1:%.*]] = icmp eq i8* [[TMP6]], bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*)
64 // CHECK1-NEXT:    br i1 [[WORK_MATCH1]], label [[DOTEXECUTE_FN2:%.*]], label [[DOTCHECK_NEXT3:%.*]]
65 // CHECK1:       .execute.fn2:
66 // CHECK1-NEXT:    call void @__omp_outlined__1_wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3]]
67 // CHECK1-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
68 // CHECK1:       .check.next3:
69 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)*
70 // CHECK1-NEXT:    call void [[TMP7]](i16 0, i32 [[TMP4]])
71 // CHECK1-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
72 // CHECK1:       .terminate.parallel:
73 // CHECK1-NEXT:    call void @__kmpc_kernel_end_parallel()
74 // CHECK1-NEXT:    br label [[DOTBARRIER_PARALLEL]]
75 // CHECK1:       .barrier.parallel:
76 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
77 // CHECK1-NEXT:    br label [[DOTAWAIT_WORK]]
78 // CHECK1:       .exit:
79 // CHECK1-NEXT:    ret void
80 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15
81 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
82 // CHECK1-NEXT:  entry:
83 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
84 // CHECK1-NEXT:    [[C:%.*]] = alloca i32, align 4
85 // CHECK1-NEXT:    [[CAPTURED_VARS_ADDRS7:%.*]] = alloca [2 x i8*], align 8
86 // CHECK1-NEXT:    [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
87 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
88 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
89 // CHECK1-NEXT:    [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]]
90 // CHECK1-NEXT:    [[TMP0:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]]
91 // CHECK1-NEXT:    br i1 [[TMP0]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]]
92 // CHECK1:       .worker:
93 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15_worker() #[[ATTR3]]
94 // CHECK1-NEXT:    br label [[DOTEXIT:%.*]]
95 // CHECK1:       .mastercheck:
96 // CHECK1-NEXT:    [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
97 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
98 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
99 // CHECK1-NEXT:    [[TMP1:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1
100 // CHECK1-NEXT:    [[TMP2:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1
101 // CHECK1-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
102 // CHECK1-NEXT:    [[MASTER_TID:%.*]] = and i32 [[TMP2]], [[TMP3]]
103 // CHECK1-NEXT:    [[TMP4:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]]
104 // CHECK1-NEXT:    br i1 [[TMP4]], label [[DOTMASTER:%.*]], label [[DOTEXIT]]
105 // CHECK1:       .master:
106 // CHECK1-NEXT:    [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
107 // CHECK1-NEXT:    [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
108 // CHECK1-NEXT:    [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]]
109 // CHECK1-NEXT:    call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1)
110 // CHECK1-NEXT:    call void @__kmpc_data_sharing_init_stack()
111 // CHECK1-NEXT:    [[TMP5:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
112 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8
113 // CHECK1-NEXT:    call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP6]], i16 [[TMP5]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**))
114 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8
115 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i8, i8* [[TMP7]], i64 0
116 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to %struct._globalized_locals_ty*
117 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP9]], i32 0, i32 0
118 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY]], %struct._globalized_locals_ty* [[TMP9]], i32 0, i32 1
119 // CHECK1-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
120 // CHECK1-NEXT:    store i32 10, i32* [[A]], align 4
121 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
122 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i32* [[A]] to i8*
123 // CHECK1-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
124 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
125 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP13]], i64 1)
126 // CHECK1-NEXT:    store i32 100, i32* [[B]], align 4
127 // CHECK1-NEXT:    store i32 1000, i32* [[C]], align 4
128 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS7]], i64 0, i64 0
129 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i32* [[B]] to i8*
130 // CHECK1-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 8
131 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS7]], i64 0, i64 1
132 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i32* [[A]] to i8*
133 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
134 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS7]] to i8**
135 // CHECK1-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP18]], i64 2)
136 // CHECK1-NEXT:    [[TMP19:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2
137 // CHECK1-NEXT:    call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP19]])
138 // CHECK1-NEXT:    br label [[DOTTERMINATION_NOTIFIER:%.*]]
139 // CHECK1:       .termination.notifier:
140 // CHECK1-NEXT:    call void @__kmpc_kernel_deinit(i16 1)
141 // CHECK1-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
142 // CHECK1-NEXT:    br label [[DOTEXIT]]
143 // CHECK1:       .exit:
144 // CHECK1-NEXT:    ret void
145 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
146 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
147 // CHECK1-NEXT:  entry:
148 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
149 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
150 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
151 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
152 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
153 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
154 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
155 // CHECK1-NEXT:    store i32 1000, i32* [[TMP0]], align 4
156 // CHECK1-NEXT:    ret void
157 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
158 // CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
159 // CHECK1-NEXT:  entry:
160 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
161 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
162 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
163 // CHECK1-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
164 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
165 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
166 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
167 // CHECK1-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
168 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
169 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
170 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
171 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
172 // CHECK1-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR3]]
173 // CHECK1-NEXT:    ret void
174 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
175 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
176 // CHECK1-NEXT:  entry:
177 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
178 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
179 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
180 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
181 // CHECK1-NEXT:    [[C:%.*]] = alloca i32, align 4
182 // CHECK1-NEXT:    [[C1:%.*]] = alloca i32*, align 8
183 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
184 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
185 // CHECK1-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
186 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
187 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
188 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
189 // CHECK1-NEXT:    store i32* [[C]], i32** [[C1]], align 8
190 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
191 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP2]], 10000
192 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
193 // CHECK1-NEXT:    ret void
194 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
195 // CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
196 // CHECK1-NEXT:  entry:
197 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
198 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
199 // CHECK1-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
200 // CHECK1-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
201 // CHECK1-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
202 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
203 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
204 // CHECK1-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
205 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
206 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
207 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
208 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
209 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
210 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
211 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
212 // CHECK1-NEXT:    call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]]
213 // CHECK1-NEXT:    ret void
214 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15_worker
215 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
216 // CHECK2-NEXT:  entry:
217 // CHECK2-NEXT:    [[WORK_FN:%.*]] = alloca i8*, align 8
218 // CHECK2-NEXT:    [[EXEC_STATUS:%.*]] = alloca i8, align 1
219 // CHECK2-NEXT:    store i8* null, i8** [[WORK_FN]], align 8
220 // CHECK2-NEXT:    store i8 0, i8* [[EXEC_STATUS]], align 1
221 // CHECK2-NEXT:    br label [[DOTAWAIT_WORK:%.*]]
222 // CHECK2:       .await.work:
223 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
224 // CHECK2-NEXT:    [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]])
225 // CHECK2-NEXT:    [[TMP1:%.*]] = zext i1 [[TMP0]] to i8
226 // CHECK2-NEXT:    store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1
227 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8
228 // CHECK2-NEXT:    [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null
229 // CHECK2-NEXT:    br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]]
230 // CHECK2:       .select.workers:
231 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1
232 // CHECK2-NEXT:    [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0
233 // CHECK2-NEXT:    br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]]
234 // CHECK2:       .execute.parallel:
235 // CHECK2-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
236 // CHECK2-NEXT:    [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8
237 // CHECK2-NEXT:    [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*)
238 // CHECK2-NEXT:    br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]]
239 // CHECK2:       .execute.fn:
240 // CHECK2-NEXT:    call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]]
241 // CHECK2-NEXT:    br label [[DOTTERMINATE_PARALLEL:%.*]]
242 // CHECK2:       .check.next:
243 // CHECK2-NEXT:    [[TMP6:%.*]] = load i8*, i8** [[WORK_FN]], align 8
244 // CHECK2-NEXT:    [[WORK_MATCH1:%.*]] = icmp eq i8* [[TMP6]], bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*)
245 // CHECK2-NEXT:    br i1 [[WORK_MATCH1]], label [[DOTEXECUTE_FN2:%.*]], label [[DOTCHECK_NEXT3:%.*]]
246 // CHECK2:       .execute.fn2:
247 // CHECK2-NEXT:    call void @__omp_outlined__1_wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3]]
248 // CHECK2-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
249 // CHECK2:       .check.next3:
250 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)*
251 // CHECK2-NEXT:    call void [[TMP7]](i16 0, i32 [[TMP4]])
252 // CHECK2-NEXT:    br label [[DOTTERMINATE_PARALLEL]]
253 // CHECK2:       .terminate.parallel:
254 // CHECK2-NEXT:    call void @__kmpc_kernel_end_parallel()
255 // CHECK2-NEXT:    br label [[DOTBARRIER_PARALLEL]]
256 // CHECK2:       .barrier.parallel:
257 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
258 // CHECK2-NEXT:    br label [[DOTAWAIT_WORK]]
259 // CHECK2:       .exit:
260 // CHECK2-NEXT:    ret void
261 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15
262 // CHECK2-SAME: () #[[ATTR1:[0-9]+]] {
263 // CHECK2-NEXT:  entry:
264 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
265 // CHECK2-NEXT:    [[C:%.*]] = alloca i32, align 4
266 // CHECK2-NEXT:    [[CAPTURED_VARS_ADDRS7:%.*]] = alloca [2 x i8*], align 8
267 // CHECK2-NEXT:    [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
268 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
269 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
270 // CHECK2-NEXT:    [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]]
271 // CHECK2-NEXT:    [[TMP0:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]]
272 // CHECK2-NEXT:    br i1 [[TMP0]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]]
273 // CHECK2:       .worker:
274 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l15_worker() #[[ATTR3]]
275 // CHECK2-NEXT:    br label [[DOTEXIT:%.*]]
276 // CHECK2:       .mastercheck:
277 // CHECK2-NEXT:    [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
278 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
279 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
280 // CHECK2-NEXT:    [[TMP1:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1
281 // CHECK2-NEXT:    [[TMP2:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1
282 // CHECK2-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
283 // CHECK2-NEXT:    [[MASTER_TID:%.*]] = and i32 [[TMP2]], [[TMP3]]
284 // CHECK2-NEXT:    [[TMP4:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]]
285 // CHECK2-NEXT:    br i1 [[TMP4]], label [[DOTMASTER:%.*]], label [[DOTEXIT]]
286 // CHECK2:       .master:
287 // CHECK2-NEXT:    [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
288 // CHECK2-NEXT:    [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
289 // CHECK2-NEXT:    [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]]
290 // CHECK2-NEXT:    call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1)
291 // CHECK2-NEXT:    call void @__kmpc_data_sharing_init_stack()
292 // CHECK2-NEXT:    [[TMP5:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 8, i16 1)
293 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
294 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
295 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 1
296 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
297 // CHECK2-NEXT:    store i32 10, i32* [[A]], align 4
298 // CHECK2-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
299 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast i32* [[A]] to i8*
300 // CHECK2-NEXT:    store i8* [[TMP9]], i8** [[TMP8]], align 8
301 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
302 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP10]], i64 1)
303 // CHECK2-NEXT:    store i32 100, i32* [[B]], align 4
304 // CHECK2-NEXT:    store i32 1000, i32* [[C]], align 4
305 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS7]], i64 0, i64 0
306 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i32* [[B]] to i8*
307 // CHECK2-NEXT:    store i8* [[TMP12]], i8** [[TMP11]], align 8
308 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS7]], i64 0, i64 1
309 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i32* [[A]] to i8*
310 // CHECK2-NEXT:    store i8* [[TMP14]], i8** [[TMP13]], align 8
311 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS7]] to i8**
312 // CHECK2-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP15]], i64 2)
313 // CHECK2-NEXT:    call void @__kmpc_data_sharing_pop_stack(i8* [[TMP5]])
314 // CHECK2-NEXT:    br label [[DOTTERMINATION_NOTIFIER:%.*]]
315 // CHECK2:       .termination.notifier:
316 // CHECK2-NEXT:    call void @__kmpc_kernel_deinit(i16 1)
317 // CHECK2-NEXT:    call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
318 // CHECK2-NEXT:    br label [[DOTEXIT]]
319 // CHECK2:       .exit:
320 // CHECK2-NEXT:    ret void
321 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
322 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
323 // CHECK2-NEXT:  entry:
324 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
325 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
326 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
327 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
328 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
329 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
330 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
331 // CHECK2-NEXT:    store i32 1000, i32* [[TMP0]], align 4
332 // CHECK2-NEXT:    ret void
333 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
334 // CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
335 // CHECK2-NEXT:  entry:
336 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
337 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
338 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
339 // CHECK2-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
340 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
341 // CHECK2-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
342 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
343 // CHECK2-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
344 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
345 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
346 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
347 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
348 // CHECK2-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR3]]
349 // CHECK2-NEXT:    ret void
350 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
351 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
352 // CHECK2-NEXT:  entry:
353 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
354 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
355 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
356 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
357 // CHECK2-NEXT:    [[C:%.*]] = alloca i32, align 4
358 // CHECK2-NEXT:    [[C1:%.*]] = alloca i32*, align 8
359 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
360 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
361 // CHECK2-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
362 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
363 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
364 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
365 // CHECK2-NEXT:    store i32* [[C]], i32** [[C1]], align 8
366 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
367 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP2]], 10000
368 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
369 // CHECK2-NEXT:    ret void
370 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
371 // CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
372 // CHECK2-NEXT:  entry:
373 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
374 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
375 // CHECK2-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
376 // CHECK2-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
377 // CHECK2-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
378 // CHECK2-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
379 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
380 // CHECK2-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
381 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
382 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
383 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
384 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
385 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
386 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
387 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
388 // CHECK2-NEXT:    call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]]
389 // CHECK2-NEXT:    ret void
390 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l14
391 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
392 // CHECK-NEXT:  entry:
393 // CHECK-NEXT:    [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
394 // CHECK-NEXT:    [[C:%.*]] = alloca i32, align 4
395 // CHECK-NEXT:    [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [2 x i8*], align 8
396 // CHECK-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true)
397 // CHECK-NEXT:    [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
398 // CHECK-NEXT:    br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
399 // CHECK:       user_code.entry:
400 // CHECK-NEXT:    [[A:%.*]] = call i8* @__kmpc_alloc_shared(i64 4)
401 // CHECK-NEXT:    [[A_ON_STACK:%.*]] = bitcast i8* [[A]] to i32*
402 // CHECK-NEXT:    [[B:%.*]] = call i8* @__kmpc_alloc_shared(i64 4)
403 // CHECK-NEXT:    [[B_ON_STACK:%.*]] = bitcast i8* [[B]] to i32*
404 // CHECK-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
405 // CHECK-NEXT:    store i32 10, i32* [[A_ON_STACK]], align 4
406 // CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
407 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
408 // CHECK-NEXT:    store i8* [[TMP3]], i8** [[TMP2]], align 8
409 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
410 // CHECK-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP4]], i64 1)
411 // CHECK-NEXT:    store i32 100, i32* [[B_ON_STACK]], align 4
412 // CHECK-NEXT:    store i32 1000, i32* [[C]], align 4
413 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 0
414 // CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[B_ON_STACK]] to i8*
415 // CHECK-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
416 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 1
417 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
418 // CHECK-NEXT:    store i8* [[TMP8]], i8** [[TMP7]], align 8
419 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
420 // CHECK-NEXT:    call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP9]], i64 2)
421 // CHECK-NEXT:    call void @__kmpc_free_shared(i8* [[B]], i64 4)
422 // CHECK-NEXT:    call void @__kmpc_free_shared(i8* [[A]], i64 4)
423 // CHECK-NEXT:    call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true)
424 // CHECK-NEXT:    ret void
425 // CHECK:       worker.exit:
426 // CHECK-NEXT:    ret void
427 //
428 //
429 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__
430 // CHECK-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] {
431 // CHECK-NEXT:  entry:
432 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
433 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
434 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
435 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
436 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
437 // CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
438 // CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
439 // CHECK-NEXT:    store i32 1000, i32* [[TMP0]], align 4
440 // CHECK-NEXT:    ret void
441 //
442 //
443 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
444 // CHECK-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
445 // CHECK-NEXT:  entry:
446 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
447 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
448 // CHECK-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
449 // CHECK-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
450 // CHECK-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
451 // CHECK-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
452 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
453 // CHECK-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
454 // CHECK-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
455 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
456 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
457 // CHECK-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
458 // CHECK-NEXT:    call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR1:[0-9]+]]
459 // CHECK-NEXT:    ret void
460 //
461 //
462 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__1
463 // CHECK-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] {
464 // CHECK-NEXT:  entry:
465 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
466 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
467 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
468 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
469 // CHECK-NEXT:    [[C:%.*]] = alloca i32, align 4
470 // CHECK-NEXT:    [[C1:%.*]] = alloca i32*, align 8
471 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
472 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
473 // CHECK-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
474 // CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
475 // CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
476 // CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
477 // CHECK-NEXT:    store i32* [[C]], i32** [[C1]], align 8
478 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
479 // CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP2]], 10000
480 // CHECK-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
481 // CHECK-NEXT:    ret void
482 //
483 //
484 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
485 // CHECK-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] {
486 // CHECK-NEXT:  entry:
487 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i16, align 2
488 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca i32, align 4
489 // CHECK-NEXT:    [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
490 // CHECK-NEXT:    [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
491 // CHECK-NEXT:    store i32 0, i32* [[DOTZERO_ADDR]], align 4
492 // CHECK-NEXT:    store i16 [[TMP0]], i16* [[DOTADDR]], align 2
493 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
494 // CHECK-NEXT:    call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
495 // CHECK-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
496 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
497 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
498 // CHECK-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
499 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
500 // CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
501 // CHECK-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
502 // CHECK-NEXT:    call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], i32* [[TMP8]]) #[[ATTR1]]
503 // CHECK-NEXT:    ret void
504 //
505