1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 
6 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
7 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // REQUIRES: powerpc-registered-target
10 
11 // expected-no-diagnostics
12 #ifndef HEADER
13 #define HEADER
14 
gtid_test()15 void gtid_test() {
16 #pragma omp target teams distribute parallel for order(concurrent)
17   for(int i = 0 ; i < 100; i++) {}
18 }
19 
20 
21 
22 
23 #endif
24 // CHECK1-LABEL: define {{[^@]+}}@_Z9gtid_testv
25 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
26 // CHECK1-NEXT:  entry:
27 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100)
29 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
30 // CHECK1-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
31 // CHECK1-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
32 // CHECK1:       omp_offload.failed:
33 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16() #[[ATTR2:[0-9]+]]
34 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
35 // CHECK1:       omp_offload.cont:
36 // CHECK1-NEXT:    ret void
37 //
38 //
39 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16
40 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
41 // CHECK1-NEXT:  entry:
42 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
43 // CHECK1-NEXT:    ret void
44 //
45 //
46 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
47 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
48 // CHECK1-NEXT:  entry:
49 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
50 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
51 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
52 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
53 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
54 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
55 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
56 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
57 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
58 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
59 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
60 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
61 // CHECK1-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
62 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
63 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
64 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
65 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
66 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
67 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
68 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
69 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
70 // CHECK1:       cond.true:
71 // CHECK1-NEXT:    br label [[COND_END:%.*]]
72 // CHECK1:       cond.false:
73 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
74 // CHECK1-NEXT:    br label [[COND_END]]
75 // CHECK1:       cond.end:
76 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
77 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
78 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
79 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
80 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
81 // CHECK1:       omp.inner.for.cond:
82 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
83 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
84 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
85 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
86 // CHECK1:       omp.inner.for.body:
87 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
88 // CHECK1-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
89 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
90 // CHECK1-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
91 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
92 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
93 // CHECK1:       omp.inner.for.inc:
94 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
95 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
96 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
97 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
98 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
99 // CHECK1:       omp.inner.for.end:
100 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
101 // CHECK1:       omp.loop.exit:
102 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
103 // CHECK1-NEXT:    ret void
104 //
105 //
106 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
107 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
108 // CHECK1-NEXT:  entry:
109 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
110 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
111 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
112 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
113 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
114 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
115 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
116 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
117 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
118 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
119 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
120 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
121 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
122 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
123 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
124 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
125 // CHECK1-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
126 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
127 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
128 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
129 // CHECK1-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
130 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
131 // CHECK1-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
132 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
133 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
134 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
135 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
136 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
137 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
138 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
139 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
140 // CHECK1:       cond.true:
141 // CHECK1-NEXT:    br label [[COND_END:%.*]]
142 // CHECK1:       cond.false:
143 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
144 // CHECK1-NEXT:    br label [[COND_END]]
145 // CHECK1:       cond.end:
146 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
147 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
148 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
149 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
150 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
151 // CHECK1:       omp.inner.for.cond:
152 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
153 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4
154 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
155 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
156 // CHECK1:       omp.inner.for.body:
157 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
158 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
159 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
160 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4
161 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
162 // CHECK1:       omp.body.continue:
163 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
164 // CHECK1:       omp.inner.for.inc:
165 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
166 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
167 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
168 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
169 // CHECK1:       omp.inner.for.end:
170 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
171 // CHECK1:       omp.loop.exit:
172 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
173 // CHECK1-NEXT:    ret void
174 //
175 //
176 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
177 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] section ".text.startup" {
178 // CHECK1-NEXT:  entry:
179 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
180 // CHECK1-NEXT:    ret void
181 //
182 //
183 // CHECK2-LABEL: define {{[^@]+}}@_Z9gtid_testv
184 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
185 // CHECK2-NEXT:  entry:
186 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
187 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100)
188 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 0)
189 // CHECK2-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
190 // CHECK2-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
191 // CHECK2:       omp_offload.failed:
192 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16() #[[ATTR2:[0-9]+]]
193 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
194 // CHECK2:       omp_offload.cont:
195 // CHECK2-NEXT:    ret void
196 //
197 //
198 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16
199 // CHECK2-SAME: () #[[ATTR1:[0-9]+]] {
200 // CHECK2-NEXT:  entry:
201 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
202 // CHECK2-NEXT:    ret void
203 //
204 //
205 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
206 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
207 // CHECK2-NEXT:  entry:
208 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
209 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
210 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
211 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
212 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
213 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
214 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
215 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
216 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
217 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
218 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
219 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
220 // CHECK2-NEXT:    store i32 99, i32* [[DOTOMP_COMB_UB]], align 4
221 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
222 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
223 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
224 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
225 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
226 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
227 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
228 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
229 // CHECK2:       cond.true:
230 // CHECK2-NEXT:    br label [[COND_END:%.*]]
231 // CHECK2:       cond.false:
232 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
233 // CHECK2-NEXT:    br label [[COND_END]]
234 // CHECK2:       cond.end:
235 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
236 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
237 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
238 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
239 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
240 // CHECK2:       omp.inner.for.cond:
241 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
242 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
243 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
244 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
245 // CHECK2:       omp.inner.for.body:
246 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
247 // CHECK2-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
248 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
249 // CHECK2-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
250 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
251 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
252 // CHECK2:       omp.inner.for.inc:
253 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
254 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
255 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
256 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
257 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
258 // CHECK2:       omp.inner.for.end:
259 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
260 // CHECK2:       omp.loop.exit:
261 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
262 // CHECK2-NEXT:    ret void
263 //
264 //
265 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
266 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
267 // CHECK2-NEXT:  entry:
268 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
269 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
270 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
271 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
272 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
273 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
274 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
275 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
276 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
277 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
278 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
279 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
280 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
281 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
282 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
283 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
284 // CHECK2-NEXT:    store i32 99, i32* [[DOTOMP_UB]], align 4
285 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
286 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
287 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
288 // CHECK2-NEXT:    [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
289 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
290 // CHECK2-NEXT:    store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
291 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
292 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
293 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
294 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
295 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
296 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
297 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
298 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
299 // CHECK2:       cond.true:
300 // CHECK2-NEXT:    br label [[COND_END:%.*]]
301 // CHECK2:       cond.false:
302 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
303 // CHECK2-NEXT:    br label [[COND_END]]
304 // CHECK2:       cond.end:
305 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
306 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
307 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
308 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
309 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
310 // CHECK2:       omp.inner.for.cond:
311 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
312 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4
313 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
314 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
315 // CHECK2:       omp.inner.for.body:
316 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
317 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
318 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
319 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4
320 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
321 // CHECK2:       omp.body.continue:
322 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
323 // CHECK2:       omp.inner.for.inc:
324 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
325 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
326 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
327 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
328 // CHECK2:       omp.inner.for.end:
329 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
330 // CHECK2:       omp.loop.exit:
331 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
332 // CHECK2-NEXT:    ret void
333 //
334 //
335 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
336 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] section ".text.startup" {
337 // CHECK2-NEXT:  entry:
338 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
339 // CHECK2-NEXT:    ret void
340 //
341