1; RUN: llc -march=hexagon -O3 < %s
2; REQUIRES: asserts
3
4; Test that we don't assert because of requiring too many scavenger spill
5; slots. This happens because the kill flag wasn't added to the appropriate
6; operands for the spill code.
7
8define void @f0(i32 %a0, i8* noalias nocapture %a1) #0 {
9b0:
10  %v0 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef)
11  %v1 = sdiv i32 %a0, 128
12  %v2 = icmp sgt i32 %a0, 127
13  br i1 %v2, label %b1, label %b3
14
15b1:                                               ; preds = %b0
16  %v3 = bitcast i8* %a1 to <32 x i32>*
17  br label %b2
18
19b2:                                               ; preds = %b2, %b1
20  %v4 = phi <32 x i32>* [ %v3, %b1 ], [ undef, %b2 ]
21  %v5 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitializer, i32 2)
22  %v6 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v5, <32 x i32> zeroinitializer)
23  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer)
24  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v7, <32 x i32> zeroinitializer)
25  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> zeroinitializer, <32 x i32> %v8, <32 x i32> zeroinitializer)
26  %v10 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v9, <32 x i32> zeroinitializer)
27  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitializer, i32 4)
28  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v11, <32 x i32> zeroinitializer)
29  %v13 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> zeroinitializer, <32 x i32> zeroinitializer)
30  %v14 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> zeroinitializer, <32 x i32> undef)
31  %v15 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v12, <32 x i32> undef)
32  %v16 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v13, <32 x i32> undef)
33  %v17 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v10, <32 x i32> zeroinitializer)
34  %v18 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v14, <32 x i32> %v17, <32 x i32> zeroinitializer)
35  %v19 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v15, <32 x i32> %v18, <32 x i32> zeroinitializer)
36  %v20 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v16, <32 x i32> %v19, <32 x i32> zeroinitializer)
37  %v21 = getelementptr inbounds i8, i8* null, i32 undef
38  %v22 = bitcast i8* %v21 to <32 x i32>*
39  %v23 = load <32 x i32>, <32 x i32>* %v22, align 128, !tbaa !0
40  %v24 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v23, <32 x i32> zeroinitializer)
41  %v25 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v24, <32 x i32> undef)
42  %v26 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v25, <32 x i32> %v20, <32 x i32> zeroinitializer)
43  %v27 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v26, <32 x i32> zeroinitializer)
44  %v28 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v27, <32 x i32> zeroinitializer)
45  %v29 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v28, <32 x i32> zeroinitializer)
46  %v30 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v29, <32 x i32> zeroinitializer)
47  %v31 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v30, <32 x i32> zeroinitializer)
48  %v32 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v31, <32 x i32> zeroinitializer)
49  %v33 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v32, <32 x i32> zeroinitializer)
50  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v33, <32 x i32> zeroinitializer)
51  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v34, <32 x i32> zeroinitializer)
52  %v36 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 1)
53  %v37 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 1)
54  %v38 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 2)
55  %v39 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v36, <32 x i32> zeroinitializer)
56  %v40 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v37, <32 x i32> zeroinitializer)
57  %v41 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v38, <32 x i32> zeroinitializer)
58  %v42 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v39, <32 x i32> undef)
59  %v43 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v40, <32 x i32> undef)
60  %v44 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v41, <32 x i32> undef)
61  %v45 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> undef)
62  %v46 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v42, <32 x i32> %v35, <32 x i32> zeroinitializer)
63  %v47 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v43, <32 x i32> %v46, <32 x i32> zeroinitializer)
64  %v48 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v44, <32 x i32> %v47, <32 x i32> zeroinitializer)
65  %v49 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v45, <32 x i32> %v48, <32 x i32> zeroinitializer)
66  %v50 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 4)
67  %v51 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 4)
68  %v52 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
69  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v50, <32 x i32> zeroinitializer)
70  %v54 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v51, <32 x i32> zeroinitializer)
71  %v55 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v52, <32 x i32> undef)
72  %v56 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v53, <32 x i32> undef)
73  %v57 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v54, <32 x i32> undef)
74  %v58 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v49, <32 x i32> zeroinitializer)
75  %v59 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v55, <32 x i32> %v58, <32 x i32> zeroinitializer)
76  %v60 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v56, <32 x i32> %v59, <32 x i32> zeroinitializer)
77  %v61 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v57, <32 x i32> %v60, <32 x i32> zeroinitializer)
78  %v62 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> zeroinitializer, <32 x i32> %v61, <32 x i32> undef, i32 5)
79  %v63 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32> undef, <32 x i32> undef)
80  %v64 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v62)
81  %v65 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32> zeroinitializer, <32 x i32> %v64)
82  %v66 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v63)
83  %v67 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v63)
84  %v68 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v66, <32 x i32> %v67, i32 14)
85  %v69 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v65)
86  %v70 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v69, <32 x i32> undef, i32 14)
87  %v71 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %v70, <32 x i32> %v68)
88  store <32 x i32> %v71, <32 x i32>* %v4, align 128, !tbaa !0
89  %v72 = icmp slt i32 0, %v1
90  br i1 %v72, label %b2, label %b3
91
92b3:                                               ; preds = %b2, %b0
93  ret void
94}
95
96; Function Attrs: nounwind readnone
97declare <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32>) #1
98
99; Function Attrs: nounwind readnone
100declare <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32>, <32 x i32>) #1
101
102; Function Attrs: nounwind readnone
103declare <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32>, <32 x i32>) #1
104
105; Function Attrs: nounwind readnone
106declare <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1>, <32 x i32>, <32 x i32>) #1
107
108; Function Attrs: nounwind readnone
109declare <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32>, <32 x i32>, i32) #1
110
111; Function Attrs: nounwind readnone
112declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #1
113
114; Function Attrs: nounwind readnone
115declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1
116
117; Function Attrs: nounwind readnone
118declare <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32>, <32 x i32>) #1
119
120; Function Attrs: nounwind readnone
121declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
122
123; Function Attrs: nounwind readnone
124declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
125
126; Function Attrs: nounwind readnone
127declare <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32>, <32 x i32>, i32) #1
128
129; Function Attrs: nounwind readnone
130declare <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32>, <32 x i32>) #1
131
132attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
133attributes #1 = { nounwind readnone }
134
135!0 = !{!1, !1, i64 0}
136!1 = !{!"omnipotent char", !2, i64 0}
137!2 = !{!"Simple C/C++ TBAA"}
138