1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled %s -o - | FileCheck %s
3
4define dso_local arm_aapcs_vfpcc zeroext i8 @one_loop_add_add_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
5; CHECK-LABEL: one_loop_add_add_v16i8:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    push {r7, lr}
8; CHECK-NEXT:    cbz r2, .LBB0_4
9; CHECK-NEXT:  @ %bb.1: @ %vector.ph
10; CHECK-NEXT:    vmov.i32 q0, #0x0
11; CHECK-NEXT:    dlstp.8 lr, r2
12; CHECK-NEXT:  .LBB0_2: @ %vector.body
13; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
14; CHECK-NEXT:    vldrb.u8 q1, [r1], #16
15; CHECK-NEXT:    vldrb.u8 q2, [r0], #16
16; CHECK-NEXT:    vadd.i8 q0, q2, q1
17; CHECK-NEXT:    vaddv.u8 r12, q0
18; CHECK-NEXT:    letp lr, .LBB0_2
19; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
20; CHECK-NEXT:    uxtb.w r0, r12
21; CHECK-NEXT:    pop {r7, pc}
22; CHECK-NEXT:  .LBB0_4:
23; CHECK-NEXT:    mov.w r12, #0
24; CHECK-NEXT:    uxtb.w r0, r12
25; CHECK-NEXT:    pop {r7, pc}
26entry:
27  %cmp11 = icmp eq i32 %N, 0
28  br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
29
30vector.ph:                                        ; preds = %entry
31  %n.rnd.up = add i32 %N, 15
32  %n.vec = and i32 %n.rnd.up, -16
33  br label %vector.body
34
35vector.body:                                      ; preds = %vector.body, %vector.ph
36  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
37  %vec.phi = phi <16 x i8> [ zeroinitializer, %vector.ph ], [ %i5, %vector.body ]
38  %i = getelementptr inbounds i8, i8* %a, i32 %index
39  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %N)
40  %i1 = bitcast i8* %i to <16 x i8>*
41  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
42  %i2 = getelementptr inbounds i8, i8* %b, i32 %index
43  %i3 = bitcast i8* %i2 to <16 x i8>*
44  %wide.masked.load16 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
45  %i4 = add <16 x i8> %wide.masked.load, %wide.masked.load16
46  %i5 = select <16 x i1> %active.lane.mask, <16 x i8> %i4, <16 x i8> %vec.phi
47  %i6 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i5)
48  %index.next = add i32 %index, 16
49  %i7 = icmp eq i32 %index.next, %n.vec
50  br i1 %i7, label %middle.block, label %vector.body
51
52middle.block:                                     ; preds = %vector.body
53  br label %for.cond.cleanup
54
55for.cond.cleanup:                                 ; preds = %middle.block, %entry
56  %res.0.lcssa = phi i8 [ 0, %entry ], [ %i6, %middle.block ]
57  ret i8 %res.0.lcssa
58}
59
60define dso_local arm_aapcs_vfpcc signext i16 @one_loop_add_add_v8i16(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
61; CHECK-LABEL: one_loop_add_add_v8i16:
62; CHECK:       @ %bb.0: @ %entry
63; CHECK-NEXT:    cmp r2, #0
64; CHECK-NEXT:    ittt eq
65; CHECK-NEXT:    moveq r0, #0
66; CHECK-NEXT:    sxtheq r0, r0
67; CHECK-NEXT:    bxeq lr
68; CHECK-NEXT:  .LBB1_1: @ %vector.ph
69; CHECK-NEXT:    push {r7, lr}
70; CHECK-NEXT:    adds r3, r2, #7
71; CHECK-NEXT:    vmov.i32 q1, #0x0
72; CHECK-NEXT:    bic r3, r3, #7
73; CHECK-NEXT:    sub.w r12, r3, #8
74; CHECK-NEXT:    movs r3, #1
75; CHECK-NEXT:    add.w r3, r3, r12, lsr #3
76; CHECK-NEXT:    dls lr, r3
77; CHECK-NEXT:  .LBB1_2: @ %vector.body
78; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
79; CHECK-NEXT:    vctp.16 r2
80; CHECK-NEXT:    vmov q0, q1
81; CHECK-NEXT:    vpst
82; CHECK-NEXT:    vldrbt.u16 q1, [r0], #8
83; CHECK-NEXT:    subs r2, #8
84; CHECK-NEXT:    vadd.i16 q1, q0, q1
85; CHECK-NEXT:    vpst
86; CHECK-NEXT:    vldrbt.u16 q2, [r1], #8
87; CHECK-NEXT:    vadd.i16 q1, q1, q2
88; CHECK-NEXT:    le lr, .LBB1_2
89; CHECK-NEXT:  @ %bb.3: @ %middle.block
90; CHECK-NEXT:    vpsel q0, q1, q0
91; CHECK-NEXT:    vaddv.u16 r0, q0
92; CHECK-NEXT:    pop.w {r7, lr}
93; CHECK-NEXT:    sxth r0, r0
94; CHECK-NEXT:    bx lr
95entry:
96  %cmp12 = icmp eq i32 %N, 0
97  br i1 %cmp12, label %for.cond.cleanup, label %vector.ph
98
99vector.ph:                                        ; preds = %entry
100  %n.rnd.up = add i32 %N, 7
101  %n.vec = and i32 %n.rnd.up, -8
102  br label %vector.body
103
104vector.body:                                      ; preds = %vector.body, %vector.ph
105  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
106  %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %i7, %vector.body ]
107  %i = getelementptr inbounds i8, i8* %a, i32 %index
108  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
109  %i1 = bitcast i8* %i to <8 x i8>*
110  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i1, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
111  %i2 = zext <8 x i8> %wide.masked.load to <8 x i16>
112  %i3 = getelementptr inbounds i8, i8* %b, i32 %index
113  %i4 = bitcast i8* %i3 to <8 x i8>*
114  %wide.masked.load17 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i4, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
115  %i5 = zext <8 x i8> %wide.masked.load17 to <8 x i16>
116  %i6 = add <8 x i16> %vec.phi, %i2
117  %i7 = add <8 x i16> %i6, %i5
118  %index.next = add i32 %index, 8
119  %i8 = icmp eq i32 %index.next, %n.vec
120  br i1 %i8, label %middle.block, label %vector.body
121
122middle.block:                                     ; preds = %vector.body
123  %i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
124  %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
125  br label %for.cond.cleanup
126
127for.cond.cleanup:                                 ; preds = %middle.block, %entry
128  %res.0.lcssa = phi i16 [ 0, %entry ], [ %i10, %middle.block ]
129  ret i16 %res.0.lcssa
130}
131
132define dso_local arm_aapcs_vfpcc zeroext i8 @one_loop_sub_add_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
133; CHECK-LABEL: one_loop_sub_add_v16i8:
134; CHECK:       @ %bb.0: @ %entry
135; CHECK-NEXT:    cmp r2, #0
136; CHECK-NEXT:    ittt eq
137; CHECK-NEXT:    moveq r0, #0
138; CHECK-NEXT:    uxtbeq r0, r0
139; CHECK-NEXT:    bxeq lr
140; CHECK-NEXT:  .LBB2_1: @ %vector.ph
141; CHECK-NEXT:    push {r7, lr}
142; CHECK-NEXT:    add.w r3, r2, #15
143; CHECK-NEXT:    vmov.i32 q1, #0x0
144; CHECK-NEXT:    bic r3, r3, #15
145; CHECK-NEXT:    sub.w r12, r3, #16
146; CHECK-NEXT:    movs r3, #1
147; CHECK-NEXT:    add.w r3, r3, r12, lsr #4
148; CHECK-NEXT:    dls lr, r3
149; CHECK-NEXT:  .LBB2_2: @ %vector.body
150; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
151; CHECK-NEXT:    vctp.8 r2
152; CHECK-NEXT:    vmov q0, q1
153; CHECK-NEXT:    vpstt
154; CHECK-NEXT:    vldrbt.u8 q1, [r1], #16
155; CHECK-NEXT:    vldrbt.u8 q2, [r0], #16
156; CHECK-NEXT:    subs r2, #16
157; CHECK-NEXT:    vsub.i8 q1, q2, q1
158; CHECK-NEXT:    vadd.i8 q1, q1, q0
159; CHECK-NEXT:    le lr, .LBB2_2
160; CHECK-NEXT:  @ %bb.3: @ %middle.block
161; CHECK-NEXT:    vpsel q0, q1, q0
162; CHECK-NEXT:    vaddv.u8 r0, q0
163; CHECK-NEXT:    pop.w {r7, lr}
164; CHECK-NEXT:    uxtb r0, r0
165; CHECK-NEXT:    bx lr
166entry:
167  %cmp11 = icmp eq i32 %N, 0
168  br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
169
170vector.ph:                                        ; preds = %entry
171  %n.rnd.up = add i32 %N, 15
172  %n.vec = and i32 %n.rnd.up, -16
173  br label %vector.body
174
175vector.body:                                      ; preds = %vector.body, %vector.ph
176  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
177  %vec.phi = phi <16 x i8> [ zeroinitializer, %vector.ph ], [ %i5, %vector.body ]
178  %i = getelementptr inbounds i8, i8* %a, i32 %index
179  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %N)
180  %i1 = bitcast i8* %i to <16 x i8>*
181  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
182  %i2 = getelementptr inbounds i8, i8* %b, i32 %index
183  %i3 = bitcast i8* %i2 to <16 x i8>*
184  %wide.masked.load16 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
185  %i4 = sub <16 x i8> %wide.masked.load, %wide.masked.load16
186  %i5 = add <16 x i8> %i4, %vec.phi
187  %index.next = add i32 %index, 16
188  %i6 = icmp eq i32 %index.next, %n.vec
189  br i1 %i6, label %middle.block, label %vector.body
190
191middle.block:                                     ; preds = %vector.body
192  %i7 = select <16 x i1> %active.lane.mask, <16 x i8> %i5, <16 x i8> %vec.phi
193  %i8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i7)
194  br label %for.cond.cleanup
195
196for.cond.cleanup:                                 ; preds = %middle.block, %entry
197  %res.0.lcssa = phi i8 [ 0, %entry ], [ %i8, %middle.block ]
198  ret i8 %res.0.lcssa
199}
200
201define dso_local arm_aapcs_vfpcc signext i16 @one_loop_sub_add_v8i16(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
202; CHECK-LABEL: one_loop_sub_add_v8i16:
203; CHECK:       @ %bb.0: @ %entry
204; CHECK-NEXT:    cmp r2, #0
205; CHECK-NEXT:    ittt eq
206; CHECK-NEXT:    moveq r0, #0
207; CHECK-NEXT:    sxtheq r0, r0
208; CHECK-NEXT:    bxeq lr
209; CHECK-NEXT:  .LBB3_1: @ %vector.ph
210; CHECK-NEXT:    push {r7, lr}
211; CHECK-NEXT:    adds r3, r2, #7
212; CHECK-NEXT:    vmov.i32 q1, #0x0
213; CHECK-NEXT:    bic r3, r3, #7
214; CHECK-NEXT:    sub.w r12, r3, #8
215; CHECK-NEXT:    movs r3, #1
216; CHECK-NEXT:    add.w r3, r3, r12, lsr #3
217; CHECK-NEXT:    dls lr, r3
218; CHECK-NEXT:  .LBB3_2: @ %vector.body
219; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
220; CHECK-NEXT:    vctp.16 r2
221; CHECK-NEXT:    vmov q0, q1
222; CHECK-NEXT:    vpstt
223; CHECK-NEXT:    vldrbt.u16 q1, [r0], #8
224; CHECK-NEXT:    vldrbt.u16 q2, [r1], #8
225; CHECK-NEXT:    subs r2, #8
226; CHECK-NEXT:    vsub.i16 q1, q2, q1
227; CHECK-NEXT:    vadd.i16 q1, q1, q0
228; CHECK-NEXT:    le lr, .LBB3_2
229; CHECK-NEXT:  @ %bb.3: @ %middle.block
230; CHECK-NEXT:    vpsel q0, q1, q0
231; CHECK-NEXT:    vaddv.u16 r0, q0
232; CHECK-NEXT:    pop.w {r7, lr}
233; CHECK-NEXT:    sxth r0, r0
234; CHECK-NEXT:    bx lr
235entry:
236  %cmp12 = icmp eq i32 %N, 0
237  br i1 %cmp12, label %for.cond.cleanup, label %vector.ph
238
239vector.ph:                                        ; preds = %entry
240  %n.rnd.up = add i32 %N, 7
241  %n.vec = and i32 %n.rnd.up, -8
242  br label %vector.body
243
244vector.body:                                      ; preds = %vector.body, %vector.ph
245  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
246  %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %i7, %vector.body ]
247  %i = getelementptr inbounds i8, i8* %a, i32 %index
248  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
249  %i1 = bitcast i8* %i to <8 x i8>*
250  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i1, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
251  %i2 = zext <8 x i8> %wide.masked.load to <8 x i16>
252  %i3 = getelementptr inbounds i8, i8* %b, i32 %index
253  %i4 = bitcast i8* %i3 to <8 x i8>*
254  %wide.masked.load17 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i4, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
255  %i5 = zext <8 x i8> %wide.masked.load17 to <8 x i16>
256  %i6 = sub <8 x i16> %i5, %i2
257  %i7 = add <8 x i16> %i6, %vec.phi
258  %index.next = add i32 %index, 8
259  %i8 = icmp eq i32 %index.next, %n.vec
260  br i1 %i8, label %middle.block, label %vector.body
261
262middle.block:                                     ; preds = %vector.body
263  %i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
264  %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
265  br label %for.cond.cleanup
266
267for.cond.cleanup:                                 ; preds = %middle.block, %entry
268  %res.0.lcssa = phi i16 [ 0, %entry ], [ %i10, %middle.block ]
269  ret i16 %res.0.lcssa
270}
271
272define dso_local arm_aapcs_vfpcc zeroext i8 @one_loop_mul_add_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
273; CHECK-LABEL: one_loop_mul_add_v16i8:
274; CHECK:       @ %bb.0: @ %entry
275; CHECK-NEXT:    cmp r2, #0
276; CHECK-NEXT:    ittt eq
277; CHECK-NEXT:    moveq r0, #0
278; CHECK-NEXT:    uxtbeq r0, r0
279; CHECK-NEXT:    bxeq lr
280; CHECK-NEXT:  .LBB4_1: @ %vector.ph
281; CHECK-NEXT:    push {r7, lr}
282; CHECK-NEXT:    add.w r3, r2, #15
283; CHECK-NEXT:    vmov.i32 q1, #0x0
284; CHECK-NEXT:    bic r3, r3, #15
285; CHECK-NEXT:    sub.w r12, r3, #16
286; CHECK-NEXT:    movs r3, #1
287; CHECK-NEXT:    add.w r3, r3, r12, lsr #4
288; CHECK-NEXT:    dls lr, r3
289; CHECK-NEXT:  .LBB4_2: @ %vector.body
290; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
291; CHECK-NEXT:    vctp.8 r2
292; CHECK-NEXT:    vmov q0, q1
293; CHECK-NEXT:    vpstt
294; CHECK-NEXT:    vldrbt.u8 q1, [r0], #16
295; CHECK-NEXT:    vldrbt.u8 q2, [r1], #16
296; CHECK-NEXT:    subs r2, #16
297; CHECK-NEXT:    vmul.i8 q1, q2, q1
298; CHECK-NEXT:    vadd.i8 q1, q1, q0
299; CHECK-NEXT:    le lr, .LBB4_2
300; CHECK-NEXT:  @ %bb.3: @ %middle.block
301; CHECK-NEXT:    vpsel q0, q1, q0
302; CHECK-NEXT:    vaddv.u8 r0, q0
303; CHECK-NEXT:    pop.w {r7, lr}
304; CHECK-NEXT:    uxtb r0, r0
305; CHECK-NEXT:    bx lr
306entry:
307  %cmp10 = icmp eq i32 %N, 0
308  br i1 %cmp10, label %for.cond.cleanup, label %vector.ph
309
310vector.ph:                                        ; preds = %entry
311  %n.rnd.up = add i32 %N, 15
312  %n.vec = and i32 %n.rnd.up, -16
313  br label %vector.body
314
315vector.body:                                      ; preds = %vector.body, %vector.ph
316  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
317  %vec.phi = phi <16 x i8> [ zeroinitializer, %vector.ph ], [ %i5, %vector.body ]
318  %i = getelementptr inbounds i8, i8* %a, i32 %index
319  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %N)
320  %i1 = bitcast i8* %i to <16 x i8>*
321  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
322  %i2 = getelementptr inbounds i8, i8* %b, i32 %index
323  %i3 = bitcast i8* %i2 to <16 x i8>*
324  %wide.masked.load15 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
325  %i4 = mul <16 x i8> %wide.masked.load15, %wide.masked.load
326  %i5 = add <16 x i8> %i4, %vec.phi
327  %index.next = add i32 %index, 16
328  %i6 = icmp eq i32 %index.next, %n.vec
329  br i1 %i6, label %middle.block, label %vector.body
330
331middle.block:                                     ; preds = %vector.body
332  %i7 = select <16 x i1> %active.lane.mask, <16 x i8> %i5, <16 x i8> %vec.phi
333  %i8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i7)
334  br label %for.cond.cleanup
335
336for.cond.cleanup:                                 ; preds = %middle.block, %entry
337  %res.0.lcssa = phi i8 [ 0, %entry ], [ %i8, %middle.block ]
338  ret i8 %res.0.lcssa
339}
340
341define dso_local arm_aapcs_vfpcc signext i16 @one_loop_mul_add_v8i16(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
342; CHECK-LABEL: one_loop_mul_add_v8i16:
343; CHECK:       @ %bb.0: @ %entry
344; CHECK-NEXT:    cmp r2, #0
345; CHECK-NEXT:    ittt eq
346; CHECK-NEXT:    moveq r0, #0
347; CHECK-NEXT:    sxtheq r0, r0
348; CHECK-NEXT:    bxeq lr
349; CHECK-NEXT:  .LBB5_1: @ %vector.ph
350; CHECK-NEXT:    push {r7, lr}
351; CHECK-NEXT:    adds r3, r2, #7
352; CHECK-NEXT:    vmov.i32 q1, #0x0
353; CHECK-NEXT:    bic r3, r3, #7
354; CHECK-NEXT:    sub.w r12, r3, #8
355; CHECK-NEXT:    movs r3, #1
356; CHECK-NEXT:    add.w r3, r3, r12, lsr #3
357; CHECK-NEXT:    dls lr, r3
358; CHECK-NEXT:  .LBB5_2: @ %vector.body
359; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
360; CHECK-NEXT:    vctp.16 r2
361; CHECK-NEXT:    vmov q0, q1
362; CHECK-NEXT:    vpstt
363; CHECK-NEXT:    vldrbt.u16 q1, [r0], #8
364; CHECK-NEXT:    vldrbt.u16 q2, [r1], #8
365; CHECK-NEXT:    subs r2, #8
366; CHECK-NEXT:    vmul.i16 q1, q2, q1
367; CHECK-NEXT:    vadd.i16 q1, q1, q0
368; CHECK-NEXT:    le lr, .LBB5_2
369; CHECK-NEXT:  @ %bb.3: @ %middle.block
370; CHECK-NEXT:    vpsel q0, q1, q0
371; CHECK-NEXT:    vaddv.u16 r0, q0
372; CHECK-NEXT:    pop.w {r7, lr}
373; CHECK-NEXT:    sxth r0, r0
374; CHECK-NEXT:    bx lr
375entry:
376  %cmp12 = icmp eq i32 %N, 0
377  br i1 %cmp12, label %for.cond.cleanup, label %vector.ph
378
379vector.ph:                                        ; preds = %entry
380  %n.rnd.up = add i32 %N, 7
381  %n.vec = and i32 %n.rnd.up, -8
382  br label %vector.body
383
384vector.body:                                      ; preds = %vector.body, %vector.ph
385  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
386  %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %i7, %vector.body ]
387  %i = getelementptr inbounds i8, i8* %a, i32 %index
388  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
389  %i1 = bitcast i8* %i to <8 x i8>*
390  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i1, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
391  %i2 = zext <8 x i8> %wide.masked.load to <8 x i16>
392  %i3 = getelementptr inbounds i8, i8* %b, i32 %index
393  %i4 = bitcast i8* %i3 to <8 x i8>*
394  %wide.masked.load17 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i4, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
395  %i5 = zext <8 x i8> %wide.masked.load17 to <8 x i16>
396  %i6 = mul <8 x i16> %i5, %i2
397  %i7 = add <8 x i16> %i6, %vec.phi
398  %index.next = add i32 %index, 8
399  %i8 = icmp eq i32 %index.next, %n.vec
400  br i1 %i8, label %middle.block, label %vector.body
401
402middle.block:                                     ; preds = %vector.body
403  %i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
404  %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
405  br label %for.cond.cleanup
406
407for.cond.cleanup:                                 ; preds = %middle.block, %entry
408  %res.0.lcssa = phi i16 [ 0, %entry ], [ %i10, %middle.block ]
409  ret i16 %res.0.lcssa
410}
411
412define dso_local arm_aapcs_vfpcc i32 @two_loops_mul_add_v4i32(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
413; CHECK-LABEL: two_loops_mul_add_v4i32:
414; CHECK:       @ %bb.0: @ %entry
415; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
416; CHECK-NEXT:    cmp r2, #0
417; CHECK-NEXT:    beq .LBB6_8
418; CHECK-NEXT:  @ %bb.1: @ %vector.ph
419; CHECK-NEXT:    adds r3, r2, #3
420; CHECK-NEXT:    vmov.i32 q1, #0x0
421; CHECK-NEXT:    bic r3, r3, #3
422; CHECK-NEXT:    mov r4, r0
423; CHECK-NEXT:    subs r7, r3, #4
424; CHECK-NEXT:    movs r3, #1
425; CHECK-NEXT:    mov r5, r1
426; CHECK-NEXT:    add.w r6, r3, r7, lsr #2
427; CHECK-NEXT:    mov r3, r2
428; CHECK-NEXT:    dls lr, r6
429; CHECK-NEXT:  .LBB6_2: @ %vector.body
430; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
431; CHECK-NEXT:    vctp.32 r3
432; CHECK-NEXT:    vmov q0, q1
433; CHECK-NEXT:    vpstt
434; CHECK-NEXT:    vldrbt.u32 q1, [r4], #4
435; CHECK-NEXT:    vldrbt.u32 q2, [r5], #4
436; CHECK-NEXT:    subs r3, #4
437; CHECK-NEXT:    vmul.i32 q1, q2, q1
438; CHECK-NEXT:    vadd.i32 q1, q1, q0
439; CHECK-NEXT:    le lr, .LBB6_2
440; CHECK-NEXT:  @ %bb.3: @ %middle.block
441; CHECK-NEXT:    vpsel q0, q1, q0
442; CHECK-NEXT:    vaddv.u32 r12, q0
443; CHECK-NEXT:    cbz r2, .LBB6_7
444; CHECK-NEXT:  @ %bb.4: @ %vector.ph47
445; CHECK-NEXT:    movs r3, #0
446; CHECK-NEXT:    vdup.32 q0, r3
447; CHECK-NEXT:    movs r3, #1
448; CHECK-NEXT:    add.w r3, r3, r7, lsr #2
449; CHECK-NEXT:    vmov.32 q0[0], r12
450; CHECK-NEXT:    dls lr, r3
451; CHECK-NEXT:  .LBB6_5: @ %vector.body46
452; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
453; CHECK-NEXT:    vctp.32 r2
454; CHECK-NEXT:    vmov q1, q0
455; CHECK-NEXT:    vpstt
456; CHECK-NEXT:    vldrbt.u32 q0, [r0], #4
457; CHECK-NEXT:    vldrbt.u32 q2, [r1], #4
458; CHECK-NEXT:    subs r2, #4
459; CHECK-NEXT:    vmul.i32 q0, q2, q0
460; CHECK-NEXT:    vadd.i32 q0, q0, q1
461; CHECK-NEXT:    le lr, .LBB6_5
462; CHECK-NEXT:  @ %bb.6: @ %middle.block44
463; CHECK-NEXT:    vpsel q0, q0, q1
464; CHECK-NEXT:    vaddv.u32 r12, q0
465; CHECK-NEXT:  .LBB6_7: @ %for.cond.cleanup7
466; CHECK-NEXT:    mov r0, r12
467; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
468; CHECK-NEXT:  .LBB6_8:
469; CHECK-NEXT:    movs r0, #0
470; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
471entry:
472  %cmp35 = icmp eq i32 %N, 0
473  br i1 %cmp35, label %for.cond.cleanup7, label %vector.ph
474
475vector.ph:                                        ; preds = %entry
476  %n.rnd.up = add i32 %N, 3
477  %n.vec = and i32 %n.rnd.up, -4
478  br label %vector.body
479
480vector.body:                                      ; preds = %vector.body, %vector.ph
481  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
482  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %i7, %vector.body ]
483  %i = getelementptr inbounds i8, i8* %a, i32 %index
484  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
485  %i1 = bitcast i8* %i to <4 x i8>*
486  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %i1, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
487  %i2 = zext <4 x i8> %wide.masked.load to <4 x i32>
488  %i3 = getelementptr inbounds i8, i8* %b, i32 %index
489  %i4 = bitcast i8* %i3 to <4 x i8>*
490  %wide.masked.load43 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %i4, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
491  %i5 = zext <4 x i8> %wide.masked.load43 to <4 x i32>
492  %i6 = mul nuw nsw <4 x i32> %i5, %i2
493  %i7 = add <4 x i32> %i6, %vec.phi
494  %index.next = add i32 %index, 4
495  %i8 = icmp eq i32 %index.next, %n.vec
496  br i1 %i8, label %middle.block, label %vector.body
497
498middle.block:                                     ; preds = %vector.body
499  %i9 = select <4 x i1> %active.lane.mask, <4 x i32> %i7, <4 x i32> %vec.phi
500  %i10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %i9)
501  br i1 %cmp35, label %for.cond.cleanup7, label %vector.ph47
502
503vector.ph47:                                      ; preds = %middle.block
504  %n.rnd.up48 = add i32 %N, 3
505  %n.vec50 = and i32 %n.rnd.up48, -4
506  %i11 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %i10, i32 0
507  br label %vector.body46
508
509vector.body46:                                    ; preds = %vector.body46, %vector.ph47
510  %index51 = phi i32 [ 0, %vector.ph47 ], [ %index.next52, %vector.body46 ]
511  %vec.phi60 = phi <4 x i32> [ %i11, %vector.ph47 ], [ %i19, %vector.body46 ]
512  %i12 = getelementptr inbounds i8, i8* %a, i32 %index51
513  %active.lane.mask61 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index51, i32 %N)
514  %i13 = bitcast i8* %i12 to <4 x i8>*
515  %wide.masked.load62 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %i13, i32 1, <4 x i1> %active.lane.mask61, <4 x i8> undef)
516  %i14 = zext <4 x i8> %wide.masked.load62 to <4 x i32>
517  %i15 = getelementptr inbounds i8, i8* %b, i32 %index51
518  %i16 = bitcast i8* %i15 to <4 x i8>*
519  %wide.masked.load63 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %i16, i32 1, <4 x i1> %active.lane.mask61, <4 x i8> undef)
520  %i17 = zext <4 x i8> %wide.masked.load63 to <4 x i32>
521  %i18 = mul nuw nsw <4 x i32> %i17, %i14
522  %i19 = add <4 x i32> %i18, %vec.phi60
523  %index.next52 = add i32 %index51, 4
524  %i20 = icmp eq i32 %index.next52, %n.vec50
525  br i1 %i20, label %middle.block44, label %vector.body46
526
527middle.block44:                                   ; preds = %vector.body46
528  %i21 = select <4 x i1> %active.lane.mask61, <4 x i32> %i19, <4 x i32> %vec.phi60
529  %i22 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %i21)
530  br label %for.cond.cleanup7
531
532for.cond.cleanup7:                                ; preds = %middle.block44, %middle.block, %entry
533  %res.1.lcssa = phi i32 [ %i10, %middle.block ], [ 0, %entry ], [ %i22, %middle.block44 ]
534  ret i32 %res.1.lcssa
535}
536
537define dso_local arm_aapcs_vfpcc void @two_reductions_mul_add_v8i16(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr {
538; CHECK-LABEL: two_reductions_mul_add_v8i16:
539; CHECK:       @ %bb.0: @ %entry
540; CHECK-NEXT:    push {r4, lr}
541; CHECK-NEXT:    vpush {d8, d9}
542; CHECK-NEXT:    cbz r2, .LBB7_4
543; CHECK-NEXT:  @ %bb.1: @ %vector.ph
544; CHECK-NEXT:    adds r3, r2, #7
545; CHECK-NEXT:    vmov.i32 q1, #0x0
546; CHECK-NEXT:    bic r3, r3, #7
547; CHECK-NEXT:    movs r4, #1
548; CHECK-NEXT:    subs r3, #8
549; CHECK-NEXT:    vmov q3, q1
550; CHECK-NEXT:    add.w r12, r4, r3, lsr #3
551; CHECK-NEXT:    mov r3, r0
552; CHECK-NEXT:    mov r4, r1
553; CHECK-NEXT:    dls lr, r12
554; CHECK-NEXT:  .LBB7_2: @ %vector.body
555; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
556; CHECK-NEXT:    vctp.16 r2
557; CHECK-NEXT:    vmov q0, q1
558; CHECK-NEXT:    vpstt
559; CHECK-NEXT:    vldrbt.u16 q1, [r3], #8
560; CHECK-NEXT:    vldrbt.u16 q4, [r4], #8
561; CHECK-NEXT:    vmov q2, q3
562; CHECK-NEXT:    vsub.i16 q3, q4, q1
563; CHECK-NEXT:    vmul.i16 q1, q4, q1
564; CHECK-NEXT:    subs r2, #8
565; CHECK-NEXT:    vadd.i16 q3, q3, q2
566; CHECK-NEXT:    vadd.i16 q1, q1, q0
567; CHECK-NEXT:    le lr, .LBB7_2
568; CHECK-NEXT:  @ %bb.3: @ %middle.block
569; CHECK-NEXT:    vpsel q2, q3, q2
570; CHECK-NEXT:    vpsel q0, q1, q0
571; CHECK-NEXT:    vaddv.u16 r4, q2
572; CHECK-NEXT:    vaddv.u16 r2, q0
573; CHECK-NEXT:    b .LBB7_5
574; CHECK-NEXT:  .LBB7_4:
575; CHECK-NEXT:    movs r2, #0
576; CHECK-NEXT:    movs r4, #0
577; CHECK-NEXT:  .LBB7_5: @ %for.cond.cleanup
578; CHECK-NEXT:    strb r2, [r0]
579; CHECK-NEXT:    strb r4, [r1]
580; CHECK-NEXT:    vpop {d8, d9}
581; CHECK-NEXT:    pop {r4, pc}
582entry:
583  %cmp12 = icmp eq i32 %N, 0
584  br i1 %cmp12, label %for.cond.cleanup, label %vector.ph
585
586vector.ph:                                        ; preds = %entry
587  %n.rnd.up = add i32 %N, 7
588  %n.vec = and i32 %n.rnd.up, -8
589  br label %vector.body
590
591vector.body:                                      ; preds = %vector.body, %vector.ph
592  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
593  %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %i8, %vector.body ]
594  %vec.phi.1 = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %i9, %vector.body ]
595  %i = getelementptr inbounds i8, i8* %a, i32 %index
596  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
597  %i1 = bitcast i8* %i to <8 x i8>*
598  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i1, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
599  %i2 = zext <8 x i8> %wide.masked.load to <8 x i16>
600  %i3 = getelementptr inbounds i8, i8* %b, i32 %index
601  %i4 = bitcast i8* %i3 to <8 x i8>*
602  %wide.masked.load17 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %i4, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
603  %i5 = zext <8 x i8> %wide.masked.load17 to <8 x i16>
604  %i6 = mul <8 x i16> %i5, %i2
605  %i7 = sub <8 x i16> %i5, %i2
606  %i8 = add <8 x i16> %i6, %vec.phi
607  %i9 = add <8 x i16> %i7, %vec.phi.1
608  %index.next = add i32 %index, 8
609  %i10 = icmp eq i32 %index.next, %n.vec
610  br i1 %i10, label %middle.block, label %vector.body
611
612middle.block:                                     ; preds = %vector.body
613  %i11 = select <8 x i1> %active.lane.mask, <8 x i16> %i8, <8 x i16> %vec.phi
614  %i12 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i11)
615  %i13 = select <8 x i1> %active.lane.mask, <8 x i16> %i9, <8 x i16> %vec.phi.1
616  %i14 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i13)
617  br label %for.cond.cleanup
618
619for.cond.cleanup:                                 ; preds = %middle.block, %entry
620  %res.0.lcssa = phi i16 [ 0, %entry ], [ %i12, %middle.block ]
621  %res.1.lcssa = phi i16 [ 0, %entry ], [ %i14, %middle.block ]
622  %trunc.res.0 = trunc i16 %res.0.lcssa to i8
623  store i8 %trunc.res.0, i8* %a
624  %trunc.res.1 = trunc i16 %res.1.lcssa to i8
625  store i8 %trunc.res.1, i8* %b
626  ret void
627}
628
629%struct.date = type { i32, i32, i32, i32 }
630@days = internal unnamed_addr constant [2 x [13 x i32]] [[13 x i32] [i32 0, i32 31, i32 28, i32 31, i32 30, i32 31, i32 30, i32 31, i32 31, i32 30, i32 31, i32 30, i32 31], [13 x i32] [i32 0, i32 31, i32 29, i32 31, i32 30, i32 31, i32 30, i32 31, i32 31, i32 30, i32 31, i32 30, i32 31]], align 4
631define i32 @wrongop(%struct.date* nocapture readonly %pd) {
632; CHECK-LABEL: wrongop:
633; CHECK:       @ %bb.0: @ %entry
634; CHECK-NEXT:    push {r4, lr}
635; CHECK-NEXT:    mov r1, r0
636; CHECK-NEXT:    movw r12, #47184
637; CHECK-NEXT:    movw r3, #23593
638; CHECK-NEXT:    ldrd r2, lr, [r1, #4]
639; CHECK-NEXT:    movt r12, #1310
640; CHECK-NEXT:    movt r3, #49807
641; CHECK-NEXT:    mla r3, lr, r3, r12
642; CHECK-NEXT:    movw r1, #55051
643; CHECK-NEXT:    movw r4, #23593
644; CHECK-NEXT:    movt r1, #163
645; CHECK-NEXT:    ldr r0, [r0]
646; CHECK-NEXT:    movt r4, #655
647; CHECK-NEXT:    ror.w r12, r3, #4
648; CHECK-NEXT:    cmp r12, r1
649; CHECK-NEXT:    cset r1, lo
650; CHECK-NEXT:    ror.w r3, r3, #2
651; CHECK-NEXT:    mov.w r12, #1
652; CHECK-NEXT:    cmp r3, r4
653; CHECK-NEXT:    csel r3, r1, r12, lo
654; CHECK-NEXT:    lsls.w r4, lr, #30
655; CHECK-NEXT:    csel r1, r1, r3, ne
656; CHECK-NEXT:    cmp r2, #1
657; CHECK-NEXT:    it lt
658; CHECK-NEXT:    poplt {r4, pc}
659; CHECK-NEXT:  .LBB8_1: @ %vector.ph
660; CHECK-NEXT:    movw r3, :lower16:days
661; CHECK-NEXT:    movs r4, #52
662; CHECK-NEXT:    movt r3, :upper16:days
663; CHECK-NEXT:    smlabb r1, r1, r4, r3
664; CHECK-NEXT:    movs r3, #0
665; CHECK-NEXT:    vdup.32 q0, r3
666; CHECK-NEXT:    vmov.32 q0[0], r0
667; CHECK-NEXT:    adds r0, r2, #3
668; CHECK-NEXT:    bic r0, r0, #3
669; CHECK-NEXT:    subs r0, #4
670; CHECK-NEXT:    add.w r0, r12, r0, lsr #2
671; CHECK-NEXT:    dls lr, r0
672; CHECK-NEXT:  .LBB8_2: @ %vector.body
673; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
674; CHECK-NEXT:    vctp.32 r2
675; CHECK-NEXT:    vmov q1, q0
676; CHECK-NEXT:    vpst
677; CHECK-NEXT:    vldrwt.u32 q0, [r1], #16
678; CHECK-NEXT:    subs r2, #4
679; CHECK-NEXT:    vadd.i32 q0, q0, q1
680; CHECK-NEXT:    le lr, .LBB8_2
681; CHECK-NEXT:  @ %bb.3: @ %middle.block
682; CHECK-NEXT:    vpsel q0, q0, q1
683; CHECK-NEXT:    vaddv.u32 r0, q0
684; CHECK-NEXT:    pop {r4, pc}
685entry:
686  %day1 = getelementptr inbounds %struct.date, %struct.date* %pd, i32 0, i32 0
687  %0 = load i32, i32* %day1, align 4
688  %year = getelementptr inbounds %struct.date, %struct.date* %pd, i32 0, i32 2
689  %1 = load i32, i32* %year, align 4
690  %2 = and i32 %1, 3
691  %cmp = icmp ne i32 %2, 0
692  %rem3 = srem i32 %1, 100
693  %cmp4.not = icmp eq i32 %rem3, 0
694  %or.cond = or i1 %cmp, %cmp4.not
695  br i1 %or.cond, label %lor.rhs, label %lor.end
696
697lor.rhs:                                          ; preds = %entry
698  %rem6 = srem i32 %1, 400
699  %cmp7 = icmp eq i32 %rem6, 0
700  %phi.cast = zext i1 %cmp7 to i32
701  br label %lor.end
702
703lor.end:                                          ; preds = %entry, %lor.rhs
704  %3 = phi i32 [ %phi.cast, %lor.rhs ], [ 1, %entry ]
705  %month = getelementptr inbounds %struct.date, %struct.date* %pd, i32 0, i32 1
706  %4 = load i32, i32* %month, align 4
707  %cmp820 = icmp sgt i32 %4, 0
708  br i1 %cmp820, label %vector.ph, label %for.end
709
710vector.ph:                                        ; preds = %lor.end
711  %n.rnd.up = add i32 %4, 3
712  %n.vec = and i32 %n.rnd.up, -4
713  %5 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %0, i32 0
714  br label %vector.body
715
716vector.body:                                      ; preds = %vector.body, %vector.ph
717  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
718  %vec.phi = phi <4 x i32> [ %5, %vector.ph ], [ %8, %vector.body ]
719  %6 = getelementptr inbounds [2 x [13 x i32]], [2 x [13 x i32]]* @days, i32 0, i32 %3, i32 %index
720  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %4)
721  %7 = bitcast i32* %6 to <4 x i32>*
722  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %7, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
723  %8 = add <4 x i32> %wide.masked.load, %vec.phi
724  %index.next = add i32 %index, 4
725  %9 = icmp eq i32 %index.next, %n.vec
726  br i1 %9, label %middle.block, label %vector.body
727
728middle.block:                                     ; preds = %vector.body
729  %10 = select <4 x i1> %active.lane.mask, <4 x i32> %8, <4 x i32> %vec.phi
730  %11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %10)
731  br label %for.end
732
733for.end:                                          ; preds = %middle.block, %lor.end
734  %day.0.lcssa = phi i32 [ %0, %lor.end ], [ %11, %middle.block ]
735  ret i32 %day.0.lcssa
736}
737
738declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
739declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
740declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
741declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
742declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
743declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
744declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
745declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
746declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
747declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
748