1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
2 // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
3 // expected-no-diagnostics
4 
5 // REQUIRES: x86-registered-target
6 
7 // TODO: The unroll-factor heuristic might be able to use the information that the trip count is constant, but currently is not able to determine that.
8 
9 #ifndef HEADER
10 #define HEADER
11 
12 double sind(double);
13 
14 // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_constant_for(
15 // CHECK-NEXT:  [[ENTRY:.*]]:
16 // CHECK-NEXT:    %[[A_ADDR:.+]] = alloca float*, align 8
17 // CHECK-NEXT:    %[[B_ADDR:.+]] = alloca float*, align 8
18 // CHECK-NEXT:    %[[C_ADDR:.+]] = alloca float*, align 8
19 // CHECK-NEXT:    %[[D_ADDR:.+]] = alloca float*, align 8
20 // CHECK-NEXT:    %[[E_ADDR:.+]] = alloca float*, align 8
21 // CHECK-NEXT:    %[[OFFSET_ADDR:.+]] = alloca float, align 4
22 // CHECK-NEXT:    %[[I:.+]] = alloca i32, align 4
23 // CHECK-NEXT:    %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
24 // CHECK-NEXT:    %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
25 // CHECK-NEXT:    %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
26 // CHECK-NEXT:    %[[P_LASTITER:.+]] = alloca i32, align 4
27 // CHECK-NEXT:    %[[P_LOWERBOUND:.+]] = alloca i32, align 4
28 // CHECK-NEXT:    %[[P_UPPERBOUND:.+]] = alloca i32, align 4
29 // CHECK-NEXT:    %[[P_STRIDE:.+]] = alloca i32, align 4
30 // CHECK-NEXT:    store float* %[[A:.+]], float** %[[A_ADDR]], align 8
31 // CHECK-NEXT:    store float* %[[B:.+]], float** %[[B_ADDR]], align 8
32 // CHECK-NEXT:    store float* %[[C:.+]], float** %[[C_ADDR]], align 8
33 // CHECK-NEXT:    store float* %[[D:.+]], float** %[[D_ADDR]], align 8
34 // CHECK-NEXT:    store float* %[[E:.+]], float** %[[E_ADDR]], align 8
35 // CHECK-NEXT:    store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4
36 // CHECK-NEXT:    store i32 0, i32* %[[I]], align 4
37 // CHECK-NEXT:    %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
38 // CHECK-NEXT:    store i32* %[[I]], i32** %[[TMP0]], align 8
39 // CHECK-NEXT:    %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
40 // CHECK-NEXT:    %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
41 // CHECK-NEXT:    store i32 %[[TMP2]], i32* %[[TMP1]], align 4
42 // CHECK-NEXT:    call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
43 // CHECK-NEXT:    %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
44 // CHECK-NEXT:    br label %[[OMP_LOOP_PREHEADER:.+]]
45 // CHECK-EMPTY:
46 // CHECK-NEXT:  [[OMP_LOOP_PREHEADER]]:
47 // CHECK-NEXT:    %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 4
48 // CHECK-NEXT:    %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 4
49 // CHECK-NEXT:    %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
50 // CHECK-NEXT:    %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
51 // CHECK-NEXT:    %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
52 // CHECK-NEXT:    br label %[[OMP_FLOOR0_PREHEADER:.+]]
53 // CHECK-EMPTY:
54 // CHECK-NEXT:  [[OMP_FLOOR0_PREHEADER]]:
55 // CHECK-NEXT:    store i32 0, i32* %[[P_LOWERBOUND]], align 4
56 // CHECK-NEXT:    %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
57 // CHECK-NEXT:    store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
58 // CHECK-NEXT:    store i32 1, i32* %[[P_STRIDE]], align 4
59 // CHECK-NEXT:    %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
60 // CHECK-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
61 // CHECK-NEXT:    %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
62 // CHECK-NEXT:    %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
63 // CHECK-NEXT:    %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
64 // CHECK-NEXT:    %[[TMP11:.+]] = add i32 %[[TMP10]], 1
65 // CHECK-NEXT:    br label %[[OMP_FLOOR0_HEADER:.+]]
66 // CHECK-EMPTY:
67 // CHECK-NEXT:  [[OMP_FLOOR0_HEADER]]:
68 // CHECK-NEXT:    %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
69 // CHECK-NEXT:    br label %[[OMP_FLOOR0_COND:.+]]
70 // CHECK-EMPTY:
71 // CHECK-NEXT:  [[OMP_FLOOR0_COND]]:
72 // CHECK-NEXT:    %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
73 // CHECK-NEXT:    br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
74 // CHECK-EMPTY:
75 // CHECK-NEXT:  [[OMP_FLOOR0_BODY]]:
76 // CHECK-NEXT:    %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
77 // CHECK-NEXT:    %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
78 // CHECK-NEXT:    %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 4
79 // CHECK-NEXT:    br label %[[OMP_TILE0_PREHEADER:.+]]
80 // CHECK-EMPTY:
81 // CHECK-NEXT:  [[OMP_TILE0_PREHEADER]]:
82 // CHECK-NEXT:    br label %[[OMP_TILE0_HEADER:.+]]
83 // CHECK-EMPTY:
84 // CHECK-NEXT:  [[OMP_TILE0_HEADER]]:
85 // CHECK-NEXT:    %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
86 // CHECK-NEXT:    br label %[[OMP_TILE0_COND:.+]]
87 // CHECK-EMPTY:
88 // CHECK-NEXT:  [[OMP_TILE0_COND]]:
89 // CHECK-NEXT:    %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
90 // CHECK-NEXT:    br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
91 // CHECK-EMPTY:
92 // CHECK-NEXT:  [[OMP_TILE0_BODY]]:
93 // CHECK-NEXT:    %[[TMP15:.+]] = mul nuw i32 4, %[[TMP12]]
94 // CHECK-NEXT:    %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
95 // CHECK-NEXT:    br label %[[OMP_LOOP_BODY:.+]]
96 // CHECK-EMPTY:
97 // CHECK-NEXT:  [[OMP_LOOP_BODY]]:
98 // CHECK-NEXT:    call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
99 // CHECK-NEXT:    %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
100 // CHECK-NEXT:    %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
101 // CHECK-NEXT:    %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
102 // CHECK-NEXT:    %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
103 // CHECK-NEXT:    %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
104 // CHECK-NEXT:    %[[CONV:.+]] = fpext float %[[TMP19]] to double
105 // CHECK-NEXT:    %[[CALL:.+]] = call double @sind(double %[[CONV]])
106 // CHECK-NEXT:    %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
107 // CHECK-NEXT:    %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
108 // CHECK-NEXT:    %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
109 // CHECK-NEXT:    %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
110 // CHECK-NEXT:    %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
111 // CHECK-NEXT:    %[[CONV4:.+]] = fpext float %[[TMP22]] to double
112 // CHECK-NEXT:    %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]]
113 // CHECK-NEXT:    %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
114 // CHECK-NEXT:    %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
115 // CHECK-NEXT:    %[[IDXPROM5:.+]] = sext i32 %[[TMP24]] to i64
116 // CHECK-NEXT:    %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM5]]
117 // CHECK-NEXT:    %[[TMP25:.+]] = load float, float* %[[ARRAYIDX6]], align 4
118 // CHECK-NEXT:    %[[CONV7:.+]] = fpext float %[[TMP25]] to double
119 // CHECK-NEXT:    %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]]
120 // CHECK-NEXT:    %[[TMP26:.+]] = load float*, float** %[[E_ADDR]], align 8
121 // CHECK-NEXT:    %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
122 // CHECK-NEXT:    %[[IDXPROM9:.+]] = sext i32 %[[TMP27]] to i64
123 // CHECK-NEXT:    %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM9]]
124 // CHECK-NEXT:    %[[TMP28:.+]] = load float, float* %[[ARRAYIDX10]], align 4
125 // CHECK-NEXT:    %[[CONV11:.+]] = fpext float %[[TMP28]] to double
126 // CHECK-NEXT:    %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]]
127 // CHECK-NEXT:    %[[TMP29:.+]] = load float, float* %[[OFFSET_ADDR]], align 4
128 // CHECK-NEXT:    %[[CONV13:.+]] = fpext float %[[TMP29]] to double
129 // CHECK-NEXT:    %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]]
130 // CHECK-NEXT:    %[[TMP30:.+]] = load float*, float** %[[A_ADDR]], align 8
131 // CHECK-NEXT:    %[[TMP31:.+]] = load i32, i32* %[[I]], align 4
132 // CHECK-NEXT:    %[[IDXPROM14:.+]] = sext i32 %[[TMP31]] to i64
133 // CHECK-NEXT:    %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP30]], i64 %[[IDXPROM14]]
134 // CHECK-NEXT:    %[[TMP32:.+]] = load float, float* %[[ARRAYIDX15]], align 4
135 // CHECK-NEXT:    %[[CONV16:.+]] = fpext float %[[TMP32]] to double
136 // CHECK-NEXT:    %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]]
137 // CHECK-NEXT:    %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float
138 // CHECK-NEXT:    store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4
139 // CHECK-NEXT:    br label %[[OMP_TILE0_INC]]
140 // CHECK-EMPTY:
141 // CHECK-NEXT:  [[OMP_TILE0_INC]]:
142 // CHECK-NEXT:    %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
143 // CHECK-NEXT:    br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
144 // CHECK-EMPTY:
145 // CHECK-NEXT:  [[OMP_TILE0_EXIT]]:
146 // CHECK-NEXT:    br label %[[OMP_TILE0_AFTER:.+]]
147 // CHECK-EMPTY:
148 // CHECK-NEXT:  [[OMP_TILE0_AFTER]]:
149 // CHECK-NEXT:    br label %[[OMP_FLOOR0_INC]]
150 // CHECK-EMPTY:
151 // CHECK-NEXT:  [[OMP_FLOOR0_INC]]:
152 // CHECK-NEXT:    %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
153 // CHECK-NEXT:    br label %[[OMP_FLOOR0_HEADER]]
154 // CHECK-EMPTY:
155 // CHECK-NEXT:  [[OMP_FLOOR0_EXIT]]:
156 // CHECK-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
157 // CHECK-NEXT:    %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
158 // CHECK-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]])
159 // CHECK-NEXT:    br label %[[OMP_FLOOR0_AFTER:.+]]
160 // CHECK-EMPTY:
161 // CHECK-NEXT:  [[OMP_FLOOR0_AFTER]]:
162 // CHECK-NEXT:    br label %[[OMP_LOOP_AFTER:.+]]
163 // CHECK-EMPTY:
164 // CHECK-NEXT:  [[OMP_LOOP_AFTER]]:
165 // CHECK-NEXT:    ret void
166 // CHECK-NEXT:  }
167 
unroll_partial_heuristic_constant_for(float * a,float * b,float * c,float * d,float * e,float offset)168 void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *d, float *e, float offset) {
169 #pragma omp for
170 #pragma omp unroll partial
171   for (int i = 0; i < 128; i++) {
172     a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
173   }
174 }
175 
176 #endif // HEADER
177 
178 // CHECK-LABEL: define {{.*}}@__captured_stmt(
179 // CHECK-NEXT:  [[ENTRY:.*]]:
180 // CHECK-NEXT:    %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
181 // CHECK-NEXT:    %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
182 // CHECK-NEXT:    %[[DOTSTART:.+]] = alloca i32, align 4
183 // CHECK-NEXT:    %[[DOTSTOP:.+]] = alloca i32, align 4
184 // CHECK-NEXT:    %[[DOTSTEP:.+]] = alloca i32, align 4
185 // CHECK-NEXT:    store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
186 // CHECK-NEXT:    store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
187 // CHECK-NEXT:    %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
188 // CHECK-NEXT:    %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
189 // CHECK-NEXT:    %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
190 // CHECK-NEXT:    %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
191 // CHECK-NEXT:    store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
192 // CHECK-NEXT:    store i32 128, i32* %[[DOTSTOP]], align 4
193 // CHECK-NEXT:    store i32 1, i32* %[[DOTSTEP]], align 4
194 // CHECK-NEXT:    %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
195 // CHECK-NEXT:    %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
196 // CHECK-NEXT:    %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
197 // CHECK-NEXT:    br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
198 // CHECK-EMPTY:
199 // CHECK-NEXT:  [[COND_TRUE]]:
200 // CHECK-NEXT:    %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
201 // CHECK-NEXT:    %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
202 // CHECK-NEXT:    %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
203 // CHECK-NEXT:    %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
204 // CHECK-NEXT:    %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]]
205 // CHECK-NEXT:    br label %[[COND_END:.+]]
206 // CHECK-EMPTY:
207 // CHECK-NEXT:  [[COND_FALSE]]:
208 // CHECK-NEXT:    br label %[[COND_END]]
209 // CHECK-EMPTY:
210 // CHECK-NEXT:  [[COND_END]]:
211 // CHECK-NEXT:    %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
212 // CHECK-NEXT:    %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
213 // CHECK-NEXT:    store i32 %[[COND]], i32* %[[TMP9]], align 4
214 // CHECK-NEXT:    ret void
215 // CHECK-NEXT:  }
216 
217 
218 // CHECK-LABEL: define {{.*}}@__captured_stmt.1(
219 // CHECK-NEXT:  [[ENTRY:.*]]:
220 // CHECK-NEXT:    %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
221 // CHECK-NEXT:    %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
222 // CHECK-NEXT:    %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
223 // CHECK-NEXT:    store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
224 // CHECK-NEXT:    store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
225 // CHECK-NEXT:    store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
226 // CHECK-NEXT:    %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
227 // CHECK-NEXT:    %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
228 // CHECK-NEXT:    %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
229 // CHECK-NEXT:    %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
230 // CHECK-NEXT:    %[[MUL:.+]] = mul i32 1, %[[TMP3]]
231 // CHECK-NEXT:    %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
232 // CHECK-NEXT:    %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
233 // CHECK-NEXT:    store i32 %[[ADD]], i32* %[[TMP4]], align 4
234 // CHECK-NEXT:    ret void
235 // CHECK-NEXT:  }
236 
237 
238 // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
239 // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
240 // CHECK: ![[META2:[0-9]+]] =
241 // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
242 // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
243 // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
244