1; RUN: llc -march=hexagon -O3 < %s
2; REQUIRES: asserts
3
4target triple = "hexagon-unknown--elf"
5
6; Function Attrs: nounwind
7define void @f0(i32 %a0, i32 %a1) #0 {
8b0:
9  %v0 = alloca [8 x i32], align 8
10  %v1 = bitcast [8 x i32]* %v0 to i8*
11  call void @llvm.memset.p0i8.i32(i8* align 8 %v1, i8 0, i32 32, i1 false)
12  %v2 = icmp sgt i32 %a0, 0
13  br i1 %v2, label %b1, label %b18
14
15b1:                                               ; preds = %b0
16  %v3 = getelementptr inbounds [8 x i32], [8 x i32]* %v0, i32 0, i32 6
17  %v4 = inttoptr i32 %a1 to i32*
18  %v5 = add i32 %a0, -1
19  %v6 = icmp sgt i32 %v5, 0
20  br i1 %v6, label %b2, label %b13
21
22b2:                                               ; preds = %b1
23  %v7 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
24  %v8 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
25  %v9 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
26  %v10 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
27  %v11 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
28  %v12 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
29  %v13 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
30  %v14 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
31  %v15 = add i32 %a0, -2
32  %v16 = lshr i32 %v15, 1
33  %v17 = add i32 %v16, 1
34  %v18 = urem i32 %v17, 2
35  %v19 = icmp ne i32 %v18, 0
36  %v20 = add i32 %v5, -2
37  %v21 = icmp ugt i32 %v17, 1
38  br i1 %v21, label %b3, label %b7
39
40b3:                                               ; preds = %b2
41  br label %b4
42
43b4:                                               ; preds = %b22, %b3
44  %v22 = phi i32 [ 0, %b3 ], [ %v124, %b22 ]
45  %v23 = phi i32 [ 0, %b3 ], [ %v136, %b22 ]
46  %v24 = mul nsw i32 %v22, 4
47  %v25 = add nsw i32 %v24, 268435456
48  %v26 = inttoptr i32 %v25 to i32*
49  store volatile i32 %a1, i32* %v26, align 4, !tbaa !0
50  %v27 = load i32, i32* %v7, align 8, !tbaa !0
51  store volatile i32 %v27, i32* %v4, align 4, !tbaa !0
52  %v28 = load i32, i32* %v8, align 4, !tbaa !0
53  store volatile i32 %v28, i32* %v4, align 4, !tbaa !0
54  %v29 = load i32, i32* %v9, align 8, !tbaa !0
55  store volatile i32 %v29, i32* %v4, align 4, !tbaa !0
56  %v30 = load i32, i32* %v10, align 4, !tbaa !0
57  store volatile i32 %v30, i32* %v4, align 4, !tbaa !0
58  %v31 = load i32, i32* %v11, align 8, !tbaa !0
59  store volatile i32 %v31, i32* %v4, align 4, !tbaa !0
60  %v32 = load i32, i32* %v12, align 4, !tbaa !0
61  store volatile i32 %v32, i32* %v4, align 4, !tbaa !0
62  %v33 = load i32, i32* %v13, align 8, !tbaa !0
63  store volatile i32 %v33, i32* %v4, align 4, !tbaa !0
64  %v34 = load i32, i32* %v14, align 4, !tbaa !0
65  store volatile i32 %v34, i32* %v4, align 4, !tbaa !0
66  %v35 = icmp eq i32 %v23, 0
67  br i1 %v35, label %b19, label %b20
68
69b5:                                               ; preds = %b22
70  %v36 = phi i32 [ %v136, %b22 ]
71  %v37 = phi i32 [ %v124, %b22 ]
72  br i1 %v19, label %b6, label %b12
73
74b6:                                               ; preds = %b5
75  br label %b7
76
77b7:                                               ; preds = %b6, %b2
78  %v38 = phi i32 [ 0, %b2 ], [ %v36, %b6 ]
79  %v39 = phi i32 [ 0, %b2 ], [ %v37, %b6 ]
80  br label %b8
81
82b8:                                               ; preds = %b10, %b7
83  %v40 = phi i32 [ %v39, %b7 ], [ %v54, %b10 ]
84  %v41 = phi i32 [ %v38, %b7 ], [ %v66, %b10 ]
85  %v42 = mul nsw i32 %v40, 4
86  %v43 = add nsw i32 %v42, 268435456
87  %v44 = inttoptr i32 %v43 to i32*
88  store volatile i32 %a1, i32* %v44, align 4, !tbaa !0
89  %v45 = load i32, i32* %v7, align 8, !tbaa !0
90  store volatile i32 %v45, i32* %v4, align 4, !tbaa !0
91  %v46 = load i32, i32* %v8, align 4, !tbaa !0
92  store volatile i32 %v46, i32* %v4, align 4, !tbaa !0
93  %v47 = load i32, i32* %v9, align 8, !tbaa !0
94  store volatile i32 %v47, i32* %v4, align 4, !tbaa !0
95  %v48 = load i32, i32* %v10, align 4, !tbaa !0
96  store volatile i32 %v48, i32* %v4, align 4, !tbaa !0
97  %v49 = load i32, i32* %v11, align 8, !tbaa !0
98  store volatile i32 %v49, i32* %v4, align 4, !tbaa !0
99  %v50 = load i32, i32* %v12, align 4, !tbaa !0
100  store volatile i32 %v50, i32* %v4, align 4, !tbaa !0
101  %v51 = load i32, i32* %v13, align 8, !tbaa !0
102  store volatile i32 %v51, i32* %v4, align 4, !tbaa !0
103  %v52 = load i32, i32* %v14, align 4, !tbaa !0
104  store volatile i32 %v52, i32* %v4, align 4, !tbaa !0
105  %v53 = icmp eq i32 %v41, 0
106  br i1 %v53, label %b9, label %b10
107
108b9:                                               ; preds = %b8
109  store i32 0, i32* %v3, align 8, !tbaa !0
110  br label %b10
111
112b10:                                              ; preds = %b9, %b8
113  %v54 = phi i32 [ 3, %b9 ], [ %v40, %b8 ]
114  %v55 = mul nsw i32 %v54, 4
115  %v56 = add nsw i32 %v55, 268435456
116  %v57 = inttoptr i32 %v56 to i32*
117  store volatile i32 %a1, i32* %v57, align 4, !tbaa !0
118  %v58 = load i32, i32* %v7, align 8, !tbaa !0
119  store volatile i32 %v58, i32* %v4, align 4, !tbaa !0
120  %v59 = load i32, i32* %v8, align 4, !tbaa !0
121  store volatile i32 %v59, i32* %v4, align 4, !tbaa !0
122  %v60 = load i32, i32* %v9, align 8, !tbaa !0
123  store volatile i32 %v60, i32* %v4, align 4, !tbaa !0
124  %v61 = load i32, i32* %v10, align 4, !tbaa !0
125  store volatile i32 %v61, i32* %v4, align 4, !tbaa !0
126  %v62 = load i32, i32* %v11, align 8, !tbaa !0
127  store volatile i32 %v62, i32* %v4, align 4, !tbaa !0
128  %v63 = load i32, i32* %v12, align 4, !tbaa !0
129  store volatile i32 %v63, i32* %v4, align 4, !tbaa !0
130  %v64 = load i32, i32* %v13, align 8, !tbaa !0
131  store volatile i32 %v64, i32* %v4, align 4, !tbaa !0
132  %v65 = load i32, i32* %v14, align 4, !tbaa !0
133  store volatile i32 %v65, i32* %v4, align 4, !tbaa !0
134  %v66 = add nsw i32 %v41, 2
135  %v67 = icmp slt i32 %v66, %v5
136  br i1 %v67, label %b8, label %b11
137
138b11:                                              ; preds = %b10
139  %v68 = phi i32 [ %v66, %b10 ]
140  %v69 = phi i32 [ %v54, %b10 ]
141  br label %b12
142
143b12:                                              ; preds = %b11, %b5
144  %v70 = phi i32 [ %v36, %b5 ], [ %v68, %b11 ]
145  %v71 = phi i32 [ %v37, %b5 ], [ %v69, %b11 ]
146  %v72 = icmp eq i32 %v70, %a0
147  br i1 %v72, label %b18, label %b13
148
149b13:                                              ; preds = %b12, %b1
150  %v73 = phi i32 [ 0, %b1 ], [ %v70, %b12 ]
151  %v74 = phi i32 [ 0, %b1 ], [ %v71, %b12 ]
152  %v75 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
153  %v76 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
154  %v77 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
155  %v78 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
156  %v79 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
157  %v80 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
158  %v81 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
159  %v82 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
160  br label %b14
161
162b14:                                              ; preds = %b16, %b13
163  %v83 = phi i32 [ %v74, %b13 ], [ %v86, %b16 ]
164  %v84 = phi i32 [ %v73, %b13 ], [ %v98, %b16 ]
165  %v85 = icmp eq i32 %v84, 1
166  br i1 %v85, label %b15, label %b16
167
168b15:                                              ; preds = %b14
169  store i32 0, i32* %v3, align 8, !tbaa !0
170  br label %b16
171
172b16:                                              ; preds = %b15, %b14
173  %v86 = phi i32 [ 3, %b15 ], [ %v83, %b14 ]
174  %v87 = mul nsw i32 %v86, 4
175  %v88 = add nsw i32 %v87, 268435456
176  %v89 = inttoptr i32 %v88 to i32*
177  store volatile i32 %a1, i32* %v89, align 4, !tbaa !0
178  %v90 = load i32, i32* %v75, align 8, !tbaa !0
179  store volatile i32 %v90, i32* %v4, align 4, !tbaa !0
180  %v91 = load i32, i32* %v76, align 4, !tbaa !0
181  store volatile i32 %v91, i32* %v4, align 4, !tbaa !0
182  %v92 = load i32, i32* %v77, align 8, !tbaa !0
183  store volatile i32 %v92, i32* %v4, align 4, !tbaa !0
184  %v93 = load i32, i32* %v78, align 4, !tbaa !0
185  store volatile i32 %v93, i32* %v4, align 4, !tbaa !0
186  %v94 = load i32, i32* %v79, align 8, !tbaa !0
187  store volatile i32 %v94, i32* %v4, align 4, !tbaa !0
188  %v95 = load i32, i32* %v80, align 4, !tbaa !0
189  store volatile i32 %v95, i32* %v4, align 4, !tbaa !0
190  %v96 = load i32, i32* %v81, align 8, !tbaa !0
191  store volatile i32 %v96, i32* %v4, align 4, !tbaa !0
192  %v97 = load i32, i32* %v82, align 4, !tbaa !0
193  store volatile i32 %v97, i32* %v4, align 4, !tbaa !0
194  %v98 = add nsw i32 %v84, 1
195  %v99 = icmp eq i32 %v98, %a0
196  br i1 %v99, label %b17, label %b14
197
198b17:                                              ; preds = %b16
199  br label %b18
200
201b18:                                              ; preds = %b17, %b12, %b0
202  ret void
203
204b19:                                              ; preds = %b4
205  store i32 0, i32* %v3, align 8, !tbaa !0
206  br label %b20
207
208b20:                                              ; preds = %b19, %b4
209  %v100 = phi i32 [ 3, %b19 ], [ %v22, %b4 ]
210  %v101 = mul nsw i32 %v100, 4
211  %v102 = add nsw i32 %v101, 268435456
212  %v103 = inttoptr i32 %v102 to i32*
213  store volatile i32 %a1, i32* %v103, align 4, !tbaa !0
214  %v104 = load i32, i32* %v7, align 8, !tbaa !0
215  store volatile i32 %v104, i32* %v4, align 4, !tbaa !0
216  %v105 = load i32, i32* %v8, align 4, !tbaa !0
217  store volatile i32 %v105, i32* %v4, align 4, !tbaa !0
218  %v106 = load i32, i32* %v9, align 8, !tbaa !0
219  store volatile i32 %v106, i32* %v4, align 4, !tbaa !0
220  %v107 = load i32, i32* %v10, align 4, !tbaa !0
221  store volatile i32 %v107, i32* %v4, align 4, !tbaa !0
222  %v108 = load i32, i32* %v11, align 8, !tbaa !0
223  store volatile i32 %v108, i32* %v4, align 4, !tbaa !0
224  %v109 = load i32, i32* %v12, align 4, !tbaa !0
225  store volatile i32 %v109, i32* %v4, align 4, !tbaa !0
226  %v110 = load i32, i32* %v13, align 8, !tbaa !0
227  store volatile i32 %v110, i32* %v4, align 4, !tbaa !0
228  %v111 = load i32, i32* %v14, align 4, !tbaa !0
229  store volatile i32 %v111, i32* %v4, align 4, !tbaa !0
230  %v112 = add nsw i32 %v23, 2
231  %v113 = mul nsw i32 %v100, 4
232  %v114 = add nsw i32 %v113, 268435456
233  %v115 = inttoptr i32 %v114 to i32*
234  store volatile i32 %a1, i32* %v115, align 4, !tbaa !0
235  %v116 = load i32, i32* %v7, align 8, !tbaa !0
236  store volatile i32 %v116, i32* %v4, align 4, !tbaa !0
237  %v117 = load i32, i32* %v8, align 4, !tbaa !0
238  store volatile i32 %v117, i32* %v4, align 4, !tbaa !0
239  %v118 = load i32, i32* %v9, align 8, !tbaa !0
240  store volatile i32 %v118, i32* %v4, align 4, !tbaa !0
241  %v119 = load i32, i32* %v10, align 4, !tbaa !0
242  store volatile i32 %v119, i32* %v4, align 4, !tbaa !0
243  %v120 = load i32, i32* %v11, align 8, !tbaa !0
244  store volatile i32 %v120, i32* %v4, align 4, !tbaa !0
245  %v121 = load i32, i32* %v12, align 4, !tbaa !0
246  store volatile i32 %v121, i32* %v4, align 4, !tbaa !0
247  %v122 = load i32, i32* %v13, align 8, !tbaa !0
248  store volatile i32 %v122, i32* %v4, align 4, !tbaa !0
249  %v123 = load i32, i32* %v14, align 4, !tbaa !0
250  store volatile i32 %v123, i32* %v4, align 4, !tbaa !0
251  br i1 false, label %b21, label %b22
252
253b21:                                              ; preds = %b20
254  store i32 0, i32* %v3, align 8, !tbaa !0
255  br label %b22
256
257b22:                                              ; preds = %b21, %b20
258  %v124 = phi i32 [ 3, %b21 ], [ %v100, %b20 ]
259  %v125 = mul nsw i32 %v124, 4
260  %v126 = add nsw i32 %v125, 268435456
261  %v127 = inttoptr i32 %v126 to i32*
262  store volatile i32 %a1, i32* %v127, align 4, !tbaa !0
263  %v128 = load i32, i32* %v7, align 8, !tbaa !0
264  store volatile i32 %v128, i32* %v4, align 4, !tbaa !0
265  %v129 = load i32, i32* %v8, align 4, !tbaa !0
266  store volatile i32 %v129, i32* %v4, align 4, !tbaa !0
267  %v130 = load i32, i32* %v9, align 8, !tbaa !0
268  store volatile i32 %v130, i32* %v4, align 4, !tbaa !0
269  %v131 = load i32, i32* %v10, align 4, !tbaa !0
270  store volatile i32 %v131, i32* %v4, align 4, !tbaa !0
271  %v132 = load i32, i32* %v11, align 8, !tbaa !0
272  store volatile i32 %v132, i32* %v4, align 4, !tbaa !0
273  %v133 = load i32, i32* %v12, align 4, !tbaa !0
274  store volatile i32 %v133, i32* %v4, align 4, !tbaa !0
275  %v134 = load i32, i32* %v13, align 8, !tbaa !0
276  store volatile i32 %v134, i32* %v4, align 4, !tbaa !0
277  %v135 = load i32, i32* %v14, align 4, !tbaa !0
278  store volatile i32 %v135, i32* %v4, align 4, !tbaa !0
279  %v136 = add nsw i32 %v112, 2
280  %v137 = icmp slt i32 %v136, %v20
281  br i1 %v137, label %b4, label %b5
282}
283
284; Function Attrs: nounwind
285define void @f1(i32 %a0, i32 %a1) #0 {
286b0:
287  tail call void @f0(i32 %a0, i32 %a1)
288  ret void
289}
290
291; Function Attrs: argmemonly nounwind
292declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
293
294attributes #0 = { nounwind }
295attributes #1 = { argmemonly nounwind }
296
297!0 = !{!1, !1, i64 0}
298!1 = !{!"long", !2, i64 0}
299!2 = !{!"omnipotent char", !3, i64 0}
300!3 = !{!"Simple C/C++ TBAA"}
301