1; RUN: llc  -march=hexagon < %s | FileCheck %s
2; This test is no longer connected to HRC.
3
4target triple = "hexagon"
5
6%s.0 = type { %s.1*, %s.2*, %s.3*, i16*, i32*, i8, i8, i8, i8, i8, i8, i16, i16, i16, i32, i32, i32, i32, i16, i8, i8, i8, i8, float, float, float, float, float, float, float, float, float, float, float, [4 x %s.7], [4 x %s.7], [20 x %s.7], [104 x %s.7], [20 x i32], [257 x %s.8], %s.9 }
7%s.1 = type { i16, i8, i16, i8, i8, i8, i8, i8 }
8%s.2 = type { i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, [20 x i16], i8, i16 }
9%s.3 = type { i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i32, i32, [2 x [2 x i32]], %s.4 }
10%s.4 = type { %s.5, [976 x i8] }
11%s.5 = type { %s.6 }
12%s.6 = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 }
13%s.7 = type { i64 }
14%s.8 = type { i32, i32 }
15%s.9 = type { %s.10, [1960 x i8] }
16%s.10 = type { i64, i64, i64, i64, i64, i64, i64, [104 x %s.11], [104 x float] }
17%s.11 = type { i64, i64 }
18%s.12 = type { float, float }
19
20; CHECK: .type   f0,@function
21; This allocframe argument value may change, but typically should remain
22; in the 250-280 range. This test was introduced to test a change that
23; reduced stack usage from around 568 bytes to 280 bytes.
24; After r308350 the stack size is ~300.
25; CHECK: allocframe(r29,#304):raw
26define void @f0(%s.0* %a0, %s.11* %a1, %s.12* %a2) #0 {
27b0:
28  %v0 = alloca %s.0*, align 4
29  %v1 = alloca %s.11*, align 4
30  %v2 = alloca %s.12*, align 4
31  %v3 = alloca float, align 4
32  %v4 = alloca float, align 4
33  %v5 = alloca float, align 4
34  %v6 = alloca float, align 4
35  %v7 = alloca float, align 4
36  %v8 = alloca float, align 4
37  %v9 = alloca float, align 4
38  %v10 = alloca float, align 4
39  %v11 = alloca float, align 4
40  %v12 = alloca float, align 4
41  %v13 = alloca double, align 8
42  %v14 = alloca double, align 8
43  %v15 = alloca double, align 8
44  %v16 = alloca double, align 8
45  %v17 = alloca double, align 8
46  %v18 = alloca double, align 8
47  %v19 = alloca double, align 8
48  %v20 = alloca double, align 8
49  %v21 = alloca double, align 8
50  %v22 = alloca double, align 8
51  %v23 = alloca double, align 8
52  %v24 = alloca double, align 8
53  %v25 = alloca double, align 8
54  %v26 = alloca double, align 8
55  %v27 = alloca double, align 8
56  %v28 = alloca double, align 8
57  %v29 = alloca double, align 8
58  %v30 = alloca double, align 8
59  %v31 = alloca double, align 8
60  %v32 = alloca double, align 8
61  %v33 = alloca double, align 8
62  store %s.0* %a0, %s.0** %v0, align 4
63  store %s.11* %a1, %s.11** %v1, align 4
64  store %s.12* %a2, %s.12** %v2, align 4
65  store double 1.000000e+00, double* %v32, align 8
66  %v34 = load %s.11*, %s.11** %v1, align 4
67  %v35 = getelementptr inbounds %s.11, %s.11* %v34, i32 0
68  %v36 = getelementptr inbounds %s.11, %s.11* %v35, i32 0, i32 0
69  %v37 = load i64, i64* %v36, align 8
70  %v38 = sitofp i64 %v37 to double
71  %v39 = load double, double* %v32, align 8
72  %v40 = fmul double %v38, %v39
73  store double %v40, double* %v13, align 8
74  %v41 = load %s.11*, %s.11** %v1, align 4
75  %v42 = getelementptr inbounds %s.11, %s.11* %v41, i32 1
76  %v43 = getelementptr inbounds %s.11, %s.11* %v42, i32 0, i32 0
77  %v44 = load i64, i64* %v43, align 8
78  %v45 = sitofp i64 %v44 to double
79  %v46 = load double, double* %v32, align 8
80  %v47 = fmul double %v45, %v46
81  store double %v47, double* %v14, align 8
82  %v48 = load %s.11*, %s.11** %v1, align 4
83  %v49 = getelementptr inbounds %s.11, %s.11* %v48, i32 1
84  %v50 = getelementptr inbounds %s.11, %s.11* %v49, i32 0, i32 1
85  %v51 = load i64, i64* %v50, align 8
86  %v52 = sitofp i64 %v51 to double
87  %v53 = load double, double* %v32, align 8
88  %v54 = fmul double %v52, %v53
89  store double %v54, double* %v15, align 8
90  %v55 = load %s.11*, %s.11** %v1, align 4
91  %v56 = getelementptr inbounds %s.11, %s.11* %v55, i32 2
92  %v57 = getelementptr inbounds %s.11, %s.11* %v56, i32 0, i32 0
93  %v58 = load i64, i64* %v57, align 8
94  %v59 = sitofp i64 %v58 to double
95  %v60 = load double, double* %v32, align 8
96  %v61 = fmul double %v59, %v60
97  store double %v61, double* %v16, align 8
98  %v62 = load %s.11*, %s.11** %v1, align 4
99  %v63 = getelementptr inbounds %s.11, %s.11* %v62, i32 2
100  %v64 = getelementptr inbounds %s.11, %s.11* %v63, i32 0, i32 1
101  %v65 = load i64, i64* %v64, align 8
102  %v66 = sitofp i64 %v65 to double
103  %v67 = load double, double* %v32, align 8
104  %v68 = fmul double %v66, %v67
105  store double %v68, double* %v17, align 8
106  %v69 = load %s.11*, %s.11** %v1, align 4
107  %v70 = getelementptr inbounds %s.11, %s.11* %v69, i32 3
108  %v71 = getelementptr inbounds %s.11, %s.11* %v70, i32 0, i32 0
109  %v72 = load i64, i64* %v71, align 8
110  %v73 = sitofp i64 %v72 to double
111  %v74 = load double, double* %v32, align 8
112  %v75 = fmul double %v73, %v74
113  store double %v75, double* %v18, align 8
114  %v76 = load %s.11*, %s.11** %v1, align 4
115  %v77 = getelementptr inbounds %s.11, %s.11* %v76, i32 3
116  %v78 = getelementptr inbounds %s.11, %s.11* %v77, i32 0, i32 1
117  %v79 = load i64, i64* %v78, align 8
118  %v80 = sitofp i64 %v79 to double
119  %v81 = load double, double* %v32, align 8
120  %v82 = fmul double %v80, %v81
121  store double %v82, double* %v19, align 8
122  %v83 = load double, double* %v13, align 8
123  %v84 = load double, double* %v13, align 8
124  %v85 = fmul double %v83, %v84
125  %v86 = load double, double* %v14, align 8
126  %v87 = load double, double* %v14, align 8
127  %v88 = fmul double %v86, %v87
128  %v89 = fsub double %v85, %v88
129  %v90 = load double, double* %v15, align 8
130  %v91 = load double, double* %v15, align 8
131  %v92 = fmul double %v90, %v91
132  %v93 = fsub double %v89, %v92
133  store double %v93, double* %v20, align 8
134  %v94 = load double, double* %v13, align 8
135  %v95 = load double, double* %v14, align 8
136  %v96 = fmul double %v94, %v95
137  %v97 = load double, double* %v16, align 8
138  %v98 = load double, double* %v14, align 8
139  %v99 = fmul double %v97, %v98
140  %v100 = fsub double %v96, %v99
141  %v101 = load double, double* %v17, align 8
142  %v102 = load double, double* %v15, align 8
143  %v103 = fmul double %v101, %v102
144  %v104 = fsub double %v100, %v103
145  store double %v104, double* %v21, align 8
146  %v105 = load double, double* %v13, align 8
147  %v106 = load double, double* %v15, align 8
148  %v107 = fmul double %v105, %v106
149  %v108 = load double, double* %v16, align 8
150  %v109 = load double, double* %v15, align 8
151  %v110 = fmul double %v108, %v109
152  %v111 = fadd double %v107, %v110
153  %v112 = load double, double* %v17, align 8
154  %v113 = load double, double* %v14, align 8
155  %v114 = fmul double %v112, %v113
156  %v115 = fsub double %v111, %v114
157  store double %v115, double* %v22, align 8
158  %v116 = load double, double* %v13, align 8
159  %v117 = load double, double* %v16, align 8
160  %v118 = fmul double %v116, %v117
161  %v119 = load double, double* %v18, align 8
162  %v120 = load double, double* %v14, align 8
163  %v121 = fmul double %v119, %v120
164  %v122 = fsub double %v118, %v121
165  %v123 = load double, double* %v19, align 8
166  %v124 = load double, double* %v15, align 8
167  %v125 = fmul double %v123, %v124
168  %v126 = fsub double %v122, %v125
169  store double %v126, double* %v23, align 8
170  %v127 = load double, double* %v13, align 8
171  %v128 = load double, double* %v17, align 8
172  %v129 = fmul double %v127, %v128
173  %v130 = load double, double* %v18, align 8
174  %v131 = load double, double* %v15, align 8
175  %v132 = fmul double %v130, %v131
176  %v133 = fadd double %v129, %v132
177  %v134 = load double, double* %v19, align 8
178  %v135 = load double, double* %v14, align 8
179  %v136 = fmul double %v134, %v135
180  %v137 = fsub double %v133, %v136
181  store double %v137, double* %v24, align 8
182  %v138 = load double, double* %v14, align 8
183  %v139 = load double, double* %v14, align 8
184  %v140 = fmul double %v138, %v139
185  %v141 = load double, double* %v15, align 8
186  %v142 = load double, double* %v15, align 8
187  %v143 = fmul double %v141, %v142
188  %v144 = fsub double %v140, %v143
189  %v145 = load double, double* %v16, align 8
190  %v146 = load double, double* %v13, align 8
191  %v147 = fmul double %v145, %v146
192  %v148 = fsub double %v144, %v147
193  store double %v148, double* %v25, align 8
194  %v149 = load double, double* %v14, align 8
195  %v150 = load double, double* %v15, align 8
196  %v151 = fmul double %v149, %v150
197  %v152 = fmul double %v151, 2.000000e+00
198  %v153 = load double, double* %v17, align 8
199  %v154 = load double, double* %v13, align 8
200  %v155 = fmul double %v153, %v154
201  %v156 = fsub double %v152, %v155
202  store double %v156, double* %v26, align 8
203  %v157 = load double, double* %v14, align 8
204  %v158 = load double, double* %v16, align 8
205  %v159 = fmul double %v157, %v158
206  %v160 = load double, double* %v15, align 8
207  %v161 = load double, double* %v17, align 8
208  %v162 = fmul double %v160, %v161
209  %v163 = fsub double %v159, %v162
210  %v164 = load double, double* %v18, align 8
211  %v165 = load double, double* %v13, align 8
212  %v166 = fmul double %v164, %v165
213  %v167 = fsub double %v163, %v166
214  store double %v167, double* %v27, align 8
215  %v168 = load double, double* %v14, align 8
216  %v169 = load double, double* %v17, align 8
217  %v170 = fmul double %v168, %v169
218  %v171 = load double, double* %v15, align 8
219  %v172 = load double, double* %v16, align 8
220  %v173 = fmul double %v171, %v172
221  %v174 = fadd double %v170, %v173
222  %v175 = load double, double* %v19, align 8
223  %v176 = load double, double* %v13, align 8
224  %v177 = fmul double %v175, %v176
225  %v178 = fsub double %v174, %v177
226  store double %v178, double* %v28, align 8
227  %v179 = load double, double* %v16, align 8
228  %v180 = load double, double* %v16, align 8
229  %v181 = fmul double %v179, %v180
230  %v182 = load double, double* %v17, align 8
231  %v183 = load double, double* %v17, align 8
232  %v184 = fmul double %v182, %v183
233  %v185 = fsub double %v181, %v184
234  %v186 = load double, double* %v18, align 8
235  %v187 = load double, double* %v14, align 8
236  %v188 = fmul double %v186, %v187
237  %v189 = fsub double %v185, %v188
238  %v190 = load double, double* %v19, align 8
239  %v191 = load double, double* %v15, align 8
240  %v192 = fmul double %v190, %v191
241  %v193 = fadd double %v189, %v192
242  store double %v193, double* %v29, align 8
243  %v194 = load double, double* %v16, align 8
244  %v195 = load double, double* %v17, align 8
245  %v196 = fmul double %v194, %v195
246  %v197 = fmul double %v196, 2.000000e+00
247  %v198 = load double, double* %v18, align 8
248  %v199 = load double, double* %v15, align 8
249  %v200 = fmul double %v198, %v199
250  %v201 = fsub double %v197, %v200
251  %v202 = load double, double* %v19, align 8
252  %v203 = load double, double* %v14, align 8
253  %v204 = fmul double %v202, %v203
254  %v205 = fsub double %v201, %v204
255  store double %v205, double* %v30, align 8
256  %v206 = load double, double* %v20, align 8
257  %v207 = load double, double* %v20, align 8
258  %v208 = fmul double %v206, %v207
259  %v209 = load double, double* %v21, align 8
260  %v210 = load double, double* %v21, align 8
261  %v211 = fmul double %v209, %v210
262  %v212 = fsub double %v208, %v211
263  %v213 = load double, double* %v22, align 8
264  %v214 = load double, double* %v22, align 8
265  %v215 = fmul double %v213, %v214
266  %v216 = fsub double %v212, %v215
267  %v217 = load double, double* %v23, align 8
268  %v218 = load double, double* %v25, align 8
269  %v219 = fmul double %v217, %v218
270  %v220 = fmul double %v219, 2.000000e+00
271  %v221 = fadd double %v216, %v220
272  %v222 = load double, double* %v24, align 8
273  %v223 = load double, double* %v26, align 8
274  %v224 = fmul double %v222, %v223
275  %v225 = fmul double %v224, 2.000000e+00
276  %v226 = fadd double %v221, %v225
277  %v227 = load double, double* %v27, align 8
278  %v228 = load double, double* %v27, align 8
279  %v229 = fmul double %v227, %v228
280  %v230 = fsub double %v226, %v229
281  %v231 = load double, double* %v28, align 8
282  %v232 = load double, double* %v28, align 8
283  %v233 = fmul double %v231, %v232
284  %v234 = fsub double %v230, %v233
285  %v235 = load double, double* %v29, align 8
286  %v236 = load double, double* %v29, align 8
287  %v237 = fmul double %v235, %v236
288  %v238 = fadd double %v234, %v237
289  %v239 = load double, double* %v30, align 8
290  %v240 = load double, double* %v30, align 8
291  %v241 = fmul double %v239, %v240
292  %v242 = fadd double %v238, %v241
293  store double %v242, double* %v31, align 8
294  %v243 = load double, double* %v31, align 8
295  %v244 = call double @f1(double %v243) #1
296  %v245 = load double, double* %v32, align 8
297  %v246 = fcmp olt double %v244, %v245
298  br i1 %v246, label %b1, label %b2
299
300b1:                                               ; preds = %b0
301  %v247 = load %s.0*, %s.0** %v0, align 4
302  %v248 = getelementptr inbounds %s.0, %s.0* %v247, i32 0, i32 2
303  %v249 = load %s.3*, %s.3** %v248, align 4
304  %v250 = getelementptr inbounds %s.3, %s.3* %v249, i32 0, i32 0
305  store i8 3, i8* %v250, align 1
306  br label %b3
307
308b2:                                               ; preds = %b0
309  %v251 = load double, double* %v32, align 8
310  %v252 = load double, double* %v31, align 8
311  %v253 = fdiv double %v251, %v252
312  store double %v253, double* %v32, align 8
313  %v254 = load double, double* %v13, align 8
314  %v255 = load double, double* %v20, align 8
315  %v256 = fmul double %v254, %v255
316  %v257 = load double, double* %v14, align 8
317  %v258 = load double, double* %v21, align 8
318  %v259 = fmul double %v257, %v258
319  %v260 = fsub double %v256, %v259
320  %v261 = load double, double* %v15, align 8
321  %v262 = load double, double* %v22, align 8
322  %v263 = fmul double %v261, %v262
323  %v264 = fsub double %v260, %v263
324  %v265 = load double, double* %v16, align 8
325  %v266 = load double, double* %v25, align 8
326  %v267 = fmul double %v265, %v266
327  %v268 = fadd double %v264, %v267
328  %v269 = load double, double* %v17, align 8
329  %v270 = load double, double* %v26, align 8
330  %v271 = fmul double %v269, %v270
331  %v272 = fadd double %v268, %v271
332  store double %v272, double* %v33, align 8
333  %v273 = load double, double* %v33, align 8
334  %v274 = load double, double* %v32, align 8
335  %v275 = fmul double %v273, %v274
336  %v276 = fptrunc double %v275 to float
337  store float %v276, float* %v3, align 4
338  %v277 = load double, double* %v14, align 8
339  %v278 = fsub double -0.000000e+00, %v277
340  %v279 = load double, double* %v20, align 8
341  %v280 = fmul double %v278, %v279
342  %v281 = load double, double* %v16, align 8
343  %v282 = load double, double* %v21, align 8
344  %v283 = fmul double %v281, %v282
345  %v284 = fadd double %v280, %v283
346  %v285 = load double, double* %v17, align 8
347  %v286 = load double, double* %v22, align 8
348  %v287 = fmul double %v285, %v286
349  %v288 = fadd double %v284, %v287
350  %v289 = load double, double* %v18, align 8
351  %v290 = load double, double* %v25, align 8
352  %v291 = fmul double %v289, %v290
353  %v292 = fsub double %v288, %v291
354  %v293 = load double, double* %v19, align 8
355  %v294 = load double, double* %v26, align 8
356  %v295 = fmul double %v293, %v294
357  %v296 = fsub double %v292, %v295
358  store double %v296, double* %v33, align 8
359  %v297 = load double, double* %v33, align 8
360  %v298 = load double, double* %v32, align 8
361  %v299 = fmul double %v297, %v298
362  %v300 = fptrunc double %v299 to float
363  store float %v300, float* %v4, align 4
364  %v301 = load double, double* %v15, align 8
365  %v302 = fsub double -0.000000e+00, %v301
366  %v303 = load double, double* %v20, align 8
367  %v304 = fmul double %v302, %v303
368  %v305 = load double, double* %v16, align 8
369  %v306 = load double, double* %v22, align 8
370  %v307 = fmul double %v305, %v306
371  %v308 = fsub double %v304, %v307
372  %v309 = load double, double* %v17, align 8
373  %v310 = load double, double* %v21, align 8
374  %v311 = fmul double %v309, %v310
375  %v312 = fadd double %v308, %v311
376  %v313 = load double, double* %v18, align 8
377  %v314 = load double, double* %v26, align 8
378  %v315 = fmul double %v313, %v314
379  %v316 = fadd double %v312, %v315
380  %v317 = load double, double* %v19, align 8
381  %v318 = load double, double* %v25, align 8
382  %v319 = fmul double %v317, %v318
383  %v320 = fsub double %v316, %v319
384  store double %v320, double* %v33, align 8
385  %v321 = load double, double* %v33, align 8
386  %v322 = load double, double* %v32, align 8
387  %v323 = fmul double %v321, %v322
388  %v324 = fptrunc double %v323 to float
389  store float %v324, float* %v5, align 4
390  %v325 = load double, double* %v16, align 8
391  %v326 = load double, double* %v29, align 8
392  %v327 = fmul double %v325, %v326
393  %v328 = load double, double* %v17, align 8
394  %v329 = load double, double* %v30, align 8
395  %v330 = fmul double %v328, %v329
396  %v331 = fadd double %v327, %v330
397  %v332 = load double, double* %v14, align 8
398  %v333 = load double, double* %v27, align 8
399  %v334 = fmul double %v332, %v333
400  %v335 = fsub double %v331, %v334
401  %v336 = load double, double* %v15, align 8
402  %v337 = load double, double* %v28, align 8
403  %v338 = fmul double %v336, %v337
404  %v339 = fsub double %v335, %v338
405  %v340 = load double, double* %v13, align 8
406  %v341 = load double, double* %v25, align 8
407  %v342 = fmul double %v340, %v341
408  %v343 = fadd double %v339, %v342
409  store double %v343, double* %v33, align 8
410  %v344 = load double, double* %v33, align 8
411  %v345 = load double, double* %v32, align 8
412  %v346 = fmul double %v344, %v345
413  %v347 = fptrunc double %v346 to float
414  store float %v347, float* %v6, align 4
415  %v348 = load double, double* %v16, align 8
416  %v349 = load double, double* %v30, align 8
417  %v350 = fmul double %v348, %v349
418  %v351 = load double, double* %v17, align 8
419  %v352 = load double, double* %v29, align 8
420  %v353 = fmul double %v351, %v352
421  %v354 = fsub double %v350, %v353
422  %v355 = load double, double* %v14, align 8
423  %v356 = load double, double* %v28, align 8
424  %v357 = fmul double %v355, %v356
425  %v358 = fsub double %v354, %v357
426  %v359 = load double, double* %v15, align 8
427  %v360 = load double, double* %v27, align 8
428  %v361 = fmul double %v359, %v360
429  %v362 = fadd double %v358, %v361
430  %v363 = load double, double* %v13, align 8
431  %v364 = load double, double* %v26, align 8
432  %v365 = fmul double %v363, %v364
433  %v366 = fadd double %v362, %v365
434  store double %v366, double* %v33, align 8
435  %v367 = load double, double* %v33, align 8
436  %v368 = load double, double* %v32, align 8
437  %v369 = fmul double %v367, %v368
438  %v370 = fptrunc double %v369 to float
439  store float %v370, float* %v7, align 4
440  %v371 = load double, double* %v14, align 8
441  %v372 = fsub double -0.000000e+00, %v371
442  %v373 = load double, double* %v29, align 8
443  %v374 = fmul double %v372, %v373
444  %v375 = load double, double* %v15, align 8
445  %v376 = load double, double* %v30, align 8
446  %v377 = fmul double %v375, %v376
447  %v378 = fsub double %v374, %v377
448  %v379 = load double, double* %v13, align 8
449  %v380 = load double, double* %v27, align 8
450  %v381 = fmul double %v379, %v380
451  %v382 = fadd double %v378, %v381
452  %v383 = load double, double* %v14, align 8
453  %v384 = load double, double* %v25, align 8
454  %v385 = fmul double %v383, %v384
455  %v386 = fsub double %v382, %v385
456  %v387 = load double, double* %v15, align 8
457  %v388 = load double, double* %v26, align 8
458  %v389 = fmul double %v387, %v388
459  %v390 = fadd double %v386, %v389
460  store double %v390, double* %v33, align 8
461  %v391 = load double, double* %v33, align 8
462  %v392 = load double, double* %v32, align 8
463  %v393 = fmul double %v391, %v392
464  %v394 = fptrunc double %v393 to float
465  store float %v394, float* %v8, align 4
466  %v395 = load double, double* %v14, align 8
467  %v396 = fsub double -0.000000e+00, %v395
468  %v397 = load double, double* %v30, align 8
469  %v398 = fmul double %v396, %v397
470  %v399 = load double, double* %v15, align 8
471  %v400 = load double, double* %v29, align 8
472  %v401 = fmul double %v399, %v400
473  %v402 = fadd double %v398, %v401
474  %v403 = load double, double* %v13, align 8
475  %v404 = load double, double* %v28, align 8
476  %v405 = fmul double %v403, %v404
477  %v406 = fadd double %v402, %v405
478  %v407 = load double, double* %v14, align 8
479  %v408 = load double, double* %v26, align 8
480  %v409 = fmul double %v407, %v408
481  %v410 = fsub double %v406, %v409
482  %v411 = load double, double* %v15, align 8
483  %v412 = load double, double* %v25, align 8
484  %v413 = fmul double %v411, %v412
485  %v414 = fsub double %v410, %v413
486  store double %v414, double* %v33, align 8
487  %v415 = load double, double* %v33, align 8
488  %v416 = load double, double* %v32, align 8
489  %v417 = fmul double %v415, %v416
490  %v418 = fptrunc double %v417 to float
491  store float %v418, float* %v9, align 4
492  %v419 = load double, double* %v13, align 8
493  %v420 = load double, double* %v20, align 8
494  %v421 = fmul double %v419, %v420
495  %v422 = load double, double* %v16, align 8
496  %v423 = load double, double* %v23, align 8
497  %v424 = fmul double %v422, %v423
498  %v425 = fsub double %v421, %v424
499  %v426 = load double, double* %v17, align 8
500  %v427 = load double, double* %v24, align 8
501  %v428 = fmul double %v426, %v427
502  %v429 = fsub double %v425, %v428
503  %v430 = load double, double* %v18, align 8
504  %v431 = load double, double* %v27, align 8
505  %v432 = fmul double %v430, %v431
506  %v433 = fadd double %v429, %v432
507  %v434 = load double, double* %v19, align 8
508  %v435 = load double, double* %v28, align 8
509  %v436 = fmul double %v434, %v435
510  %v437 = fadd double %v433, %v436
511  store double %v437, double* %v33, align 8
512  %v438 = load double, double* %v33, align 8
513  %v439 = load double, double* %v32, align 8
514  %v440 = fmul double %v438, %v439
515  %v441 = fptrunc double %v440 to float
516  store float %v441, float* %v10, align 4
517  %v442 = load double, double* %v18, align 8
518  %v443 = fsub double -0.000000e+00, %v442
519  %v444 = load double, double* %v29, align 8
520  %v445 = fmul double %v443, %v444
521  %v446 = load double, double* %v19, align 8
522  %v447 = load double, double* %v30, align 8
523  %v448 = fmul double %v446, %v447
524  %v449 = fsub double %v445, %v448
525  %v450 = load double, double* %v14, align 8
526  %v451 = load double, double* %v23, align 8
527  %v452 = fmul double %v450, %v451
528  %v453 = fadd double %v449, %v452
529  %v454 = load double, double* %v15, align 8
530  %v455 = load double, double* %v24, align 8
531  %v456 = fmul double %v454, %v455
532  %v457 = fadd double %v453, %v456
533  %v458 = load double, double* %v13, align 8
534  %v459 = load double, double* %v21, align 8
535  %v460 = fmul double %v458, %v459
536  %v461 = fsub double %v457, %v460
537  store double %v461, double* %v33, align 8
538  %v462 = load double, double* %v33, align 8
539  %v463 = load double, double* %v32, align 8
540  %v464 = fmul double %v462, %v463
541  %v465 = fptrunc double %v464 to float
542  store float %v465, float* %v11, align 4
543  %v466 = load double, double* %v18, align 8
544  %v467 = fsub double -0.000000e+00, %v466
545  %v468 = load double, double* %v30, align 8
546  %v469 = fmul double %v467, %v468
547  %v470 = load double, double* %v19, align 8
548  %v471 = load double, double* %v29, align 8
549  %v472 = fmul double %v470, %v471
550  %v473 = fadd double %v469, %v472
551  %v474 = load double, double* %v14, align 8
552  %v475 = load double, double* %v24, align 8
553  %v476 = fmul double %v474, %v475
554  %v477 = fadd double %v473, %v476
555  %v478 = load double, double* %v15, align 8
556  %v479 = load double, double* %v23, align 8
557  %v480 = fmul double %v478, %v479
558  %v481 = fsub double %v477, %v480
559  %v482 = load double, double* %v13, align 8
560  %v483 = load double, double* %v22, align 8
561  %v484 = fmul double %v482, %v483
562  %v485 = fsub double %v481, %v484
563  store double %v485, double* %v33, align 8
564  %v486 = load double, double* %v33, align 8
565  %v487 = load double, double* %v32, align 8
566  %v488 = fmul double %v486, %v487
567  %v489 = fptrunc double %v488 to float
568  store float %v489, float* %v12, align 4
569  %v490 = load float, float* %v3, align 4
570  %v491 = load %s.12*, %s.12** %v2, align 4
571  %v492 = getelementptr inbounds %s.12, %s.12* %v491, i32 0
572  %v493 = getelementptr inbounds %s.12, %s.12* %v492, i32 0, i32 0
573  store float %v490, float* %v493, align 4
574  %v494 = load %s.12*, %s.12** %v2, align 4
575  %v495 = getelementptr inbounds %s.12, %s.12* %v494, i32 0
576  %v496 = getelementptr inbounds %s.12, %s.12* %v495, i32 0, i32 1
577  store float 0.000000e+00, float* %v496, align 4
578  %v497 = load float, float* %v4, align 4
579  %v498 = load %s.12*, %s.12** %v2, align 4
580  %v499 = getelementptr inbounds %s.12, %s.12* %v498, i32 1
581  %v500 = getelementptr inbounds %s.12, %s.12* %v499, i32 0, i32 0
582  store float %v497, float* %v500, align 4
583  %v501 = load float, float* %v5, align 4
584  %v502 = load %s.12*, %s.12** %v2, align 4
585  %v503 = getelementptr inbounds %s.12, %s.12* %v502, i32 1
586  %v504 = getelementptr inbounds %s.12, %s.12* %v503, i32 0, i32 1
587  store float %v501, float* %v504, align 4
588  %v505 = load float, float* %v6, align 4
589  %v506 = load %s.12*, %s.12** %v2, align 4
590  %v507 = getelementptr inbounds %s.12, %s.12* %v506, i32 2
591  %v508 = getelementptr inbounds %s.12, %s.12* %v507, i32 0, i32 0
592  store float %v505, float* %v508, align 4
593  %v509 = load float, float* %v7, align 4
594  %v510 = load %s.12*, %s.12** %v2, align 4
595  %v511 = getelementptr inbounds %s.12, %s.12* %v510, i32 2
596  %v512 = getelementptr inbounds %s.12, %s.12* %v511, i32 0, i32 1
597  store float %v509, float* %v512, align 4
598  %v513 = load float, float* %v8, align 4
599  %v514 = load %s.12*, %s.12** %v2, align 4
600  %v515 = getelementptr inbounds %s.12, %s.12* %v514, i32 3
601  %v516 = getelementptr inbounds %s.12, %s.12* %v515, i32 0, i32 0
602  store float %v513, float* %v516, align 4
603  %v517 = load float, float* %v9, align 4
604  %v518 = load %s.12*, %s.12** %v2, align 4
605  %v519 = getelementptr inbounds %s.12, %s.12* %v518, i32 3
606  %v520 = getelementptr inbounds %s.12, %s.12* %v519, i32 0, i32 1
607  store float %v517, float* %v520, align 4
608  %v521 = load float, float* %v4, align 4
609  %v522 = load %s.12*, %s.12** %v2, align 4
610  %v523 = getelementptr inbounds %s.12, %s.12* %v522, i32 4
611  %v524 = getelementptr inbounds %s.12, %s.12* %v523, i32 0, i32 0
612  store float %v521, float* %v524, align 4
613  %v525 = load float, float* %v5, align 4
614  %v526 = fsub float -0.000000e+00, %v525
615  %v527 = load %s.12*, %s.12** %v2, align 4
616  %v528 = getelementptr inbounds %s.12, %s.12* %v527, i32 4
617  %v529 = getelementptr inbounds %s.12, %s.12* %v528, i32 0, i32 1
618  store float %v526, float* %v529, align 4
619  %v530 = load float, float* %v10, align 4
620  %v531 = load %s.12*, %s.12** %v2, align 4
621  %v532 = getelementptr inbounds %s.12, %s.12* %v531, i32 5
622  %v533 = getelementptr inbounds %s.12, %s.12* %v532, i32 0, i32 0
623  store float %v530, float* %v533, align 4
624  %v534 = load %s.12*, %s.12** %v2, align 4
625  %v535 = getelementptr inbounds %s.12, %s.12* %v534, i32 5
626  %v536 = getelementptr inbounds %s.12, %s.12* %v535, i32 0, i32 1
627  store float 0.000000e+00, float* %v536, align 4
628  %v537 = load float, float* %v11, align 4
629  %v538 = load %s.12*, %s.12** %v2, align 4
630  %v539 = getelementptr inbounds %s.12, %s.12* %v538, i32 6
631  %v540 = getelementptr inbounds %s.12, %s.12* %v539, i32 0, i32 0
632  store float %v537, float* %v540, align 4
633  %v541 = load float, float* %v12, align 4
634  %v542 = load %s.12*, %s.12** %v2, align 4
635  %v543 = getelementptr inbounds %s.12, %s.12* %v542, i32 6
636  %v544 = getelementptr inbounds %s.12, %s.12* %v543, i32 0, i32 1
637  store float %v541, float* %v544, align 4
638  %v545 = load float, float* %v6, align 4
639  %v546 = load %s.12*, %s.12** %v2, align 4
640  %v547 = getelementptr inbounds %s.12, %s.12* %v546, i32 7
641  %v548 = getelementptr inbounds %s.12, %s.12* %v547, i32 0, i32 0
642  store float %v545, float* %v548, align 4
643  %v549 = load float, float* %v7, align 4
644  %v550 = load %s.12*, %s.12** %v2, align 4
645  %v551 = getelementptr inbounds %s.12, %s.12* %v550, i32 7
646  %v552 = getelementptr inbounds %s.12, %s.12* %v551, i32 0, i32 1
647  store float %v549, float* %v552, align 4
648  %v553 = load float, float* %v6, align 4
649  %v554 = load %s.12*, %s.12** %v2, align 4
650  %v555 = getelementptr inbounds %s.12, %s.12* %v554, i32 8
651  %v556 = getelementptr inbounds %s.12, %s.12* %v555, i32 0, i32 0
652  store float %v553, float* %v556, align 4
653  %v557 = load float, float* %v7, align 4
654  %v558 = fsub float -0.000000e+00, %v557
655  %v559 = load %s.12*, %s.12** %v2, align 4
656  %v560 = getelementptr inbounds %s.12, %s.12* %v559, i32 8
657  %v561 = getelementptr inbounds %s.12, %s.12* %v560, i32 0, i32 1
658  store float %v558, float* %v561, align 4
659  %v562 = load float, float* %v11, align 4
660  %v563 = load %s.12*, %s.12** %v2, align 4
661  %v564 = getelementptr inbounds %s.12, %s.12* %v563, i32 9
662  %v565 = getelementptr inbounds %s.12, %s.12* %v564, i32 0, i32 0
663  store float %v562, float* %v565, align 4
664  %v566 = load float, float* %v12, align 4
665  %v567 = fsub float -0.000000e+00, %v566
666  %v568 = load %s.12*, %s.12** %v2, align 4
667  %v569 = getelementptr inbounds %s.12, %s.12* %v568, i32 9
668  %v570 = getelementptr inbounds %s.12, %s.12* %v569, i32 0, i32 1
669  store float %v567, float* %v570, align 4
670  %v571 = load float, float* %v10, align 4
671  %v572 = load %s.12*, %s.12** %v2, align 4
672  %v573 = getelementptr inbounds %s.12, %s.12* %v572, i32 10
673  %v574 = getelementptr inbounds %s.12, %s.12* %v573, i32 0, i32 0
674  store float %v571, float* %v574, align 4
675  %v575 = load %s.12*, %s.12** %v2, align 4
676  %v576 = getelementptr inbounds %s.12, %s.12* %v575, i32 10
677  %v577 = getelementptr inbounds %s.12, %s.12* %v576, i32 0, i32 1
678  store float 0.000000e+00, float* %v577, align 4
679  %v578 = load float, float* %v4, align 4
680  %v579 = load %s.12*, %s.12** %v2, align 4
681  %v580 = getelementptr inbounds %s.12, %s.12* %v579, i32 11
682  %v581 = getelementptr inbounds %s.12, %s.12* %v580, i32 0, i32 0
683  store float %v578, float* %v581, align 4
684  %v582 = load float, float* %v5, align 4
685  %v583 = load %s.12*, %s.12** %v2, align 4
686  %v584 = getelementptr inbounds %s.12, %s.12* %v583, i32 11
687  %v585 = getelementptr inbounds %s.12, %s.12* %v584, i32 0, i32 1
688  store float %v582, float* %v585, align 4
689  %v586 = load float, float* %v8, align 4
690  %v587 = load %s.12*, %s.12** %v2, align 4
691  %v588 = getelementptr inbounds %s.12, %s.12* %v587, i32 12
692  %v589 = getelementptr inbounds %s.12, %s.12* %v588, i32 0, i32 0
693  store float %v586, float* %v589, align 4
694  %v590 = load float, float* %v9, align 4
695  %v591 = fsub float -0.000000e+00, %v590
696  %v592 = load %s.12*, %s.12** %v2, align 4
697  %v593 = getelementptr inbounds %s.12, %s.12* %v592, i32 12
698  %v594 = getelementptr inbounds %s.12, %s.12* %v593, i32 0, i32 1
699  store float %v591, float* %v594, align 4
700  %v595 = load float, float* %v6, align 4
701  %v596 = load %s.12*, %s.12** %v2, align 4
702  %v597 = getelementptr inbounds %s.12, %s.12* %v596, i32 13
703  %v598 = getelementptr inbounds %s.12, %s.12* %v597, i32 0, i32 0
704  store float %v595, float* %v598, align 4
705  %v599 = load float, float* %v7, align 4
706  %v600 = fsub float -0.000000e+00, %v599
707  %v601 = load %s.12*, %s.12** %v2, align 4
708  %v602 = getelementptr inbounds %s.12, %s.12* %v601, i32 13
709  %v603 = getelementptr inbounds %s.12, %s.12* %v602, i32 0, i32 1
710  store float %v600, float* %v603, align 4
711  %v604 = load float, float* %v4, align 4
712  %v605 = load %s.12*, %s.12** %v2, align 4
713  %v606 = getelementptr inbounds %s.12, %s.12* %v605, i32 14
714  %v607 = getelementptr inbounds %s.12, %s.12* %v606, i32 0, i32 0
715  store float %v604, float* %v607, align 4
716  %v608 = load float, float* %v5, align 4
717  %v609 = fsub float -0.000000e+00, %v608
718  %v610 = load %s.12*, %s.12** %v2, align 4
719  %v611 = getelementptr inbounds %s.12, %s.12* %v610, i32 14
720  %v612 = getelementptr inbounds %s.12, %s.12* %v611, i32 0, i32 1
721  store float %v609, float* %v612, align 4
722  %v613 = load float, float* %v3, align 4
723  %v614 = load %s.12*, %s.12** %v2, align 4
724  %v615 = getelementptr inbounds %s.12, %s.12* %v614, i32 15
725  %v616 = getelementptr inbounds %s.12, %s.12* %v615, i32 0, i32 0
726  store float %v613, float* %v616, align 4
727  %v617 = load %s.12*, %s.12** %v2, align 4
728  %v618 = getelementptr inbounds %s.12, %s.12* %v617, i32 15
729  %v619 = getelementptr inbounds %s.12, %s.12* %v618, i32 0, i32 1
730  store float 0.000000e+00, float* %v619, align 4
731  br label %b3
732
733b3:                                               ; preds = %b2, %b1
734  ret void
735}
736
737; Function Attrs: nounwind readnone
738declare double @f1(double) #1
739
740attributes #0 = { nounwind "target-cpu"="hexagonv55" }
741attributes #1 = { nounwind readnone }
742