1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -run-pass=arm-low-overhead-loops -tail-predication=enabled %s -o - | FileCheck %s
3
4# TODOD: As far as I can tell this test is fine. The tail predicating the second loop means we remove the instruction that would otherwise block the first.
5
6--- |
7  define arm_aapcs_vfpcc void @arm_var_f32_mve(float* %pSrc, i32 %blockSize, float* nocapture %pResult) #0 {
8  entry:
9    %0 = add i32 %blockSize, 3
10    %1 = icmp slt i32 %blockSize, 4
11    %smin = select i1 %1, i32 %blockSize, i32 4
12    %2 = sub i32 %0, %smin
13    %3 = lshr i32 %2, 2
14    %4 = add nuw nsw i32 %3, 1
15    %5 = icmp slt i32 %blockSize, 4
16    %smin3 = select i1 %5, i32 %blockSize, i32 4
17    %6 = sub i32 %0, %smin3
18    %7 = lshr i32 %6, 2
19    %8 = add nuw nsw i32 %7, 1
20    %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %8)
21    br label %do.body.i
22
23  do.body.i:                                        ; preds = %do.body.i, %entry
24    %blkCnt.0.i = phi i32 [ %13, %do.body.i ], [ %blockSize, %entry ]
25    %sumVec.0.i = phi <4 x float> [ %12, %do.body.i ], [ zeroinitializer, %entry ]
26    %pSrc.addr.0.i = phi float* [ %add.ptr.i, %do.body.i ], [ %pSrc, %entry ]
27    %9 = phi i32 [ %start1, %entry ], [ %14, %do.body.i ]
28    %pSrc.addr.0.i2 = bitcast float* %pSrc.addr.0.i to <4 x float>*
29    %10 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0.i)
30    %11 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %pSrc.addr.0.i2, i32 4, <4 x i1> %10, <4 x float> zeroinitializer)
31    %12 = tail call fast <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %sumVec.0.i, <4 x float> %11, <4 x i1> %10, <4 x float> %sumVec.0.i)
32    %add.ptr.i = getelementptr inbounds float, float* %pSrc.addr.0.i, i32 4
33    %13 = add i32 %blkCnt.0.i, -4
34    %14 = call i32 @llvm.loop.decrement.reg.i32(i32 %9, i32 1)
35    %15 = icmp ne i32 %14, 0
36    br i1 %15, label %do.body.i, label %arm_mean_f32_mve.exit
37
38  arm_mean_f32_mve.exit:                            ; preds = %do.body.i
39    %16 = extractelement <4 x float> %12, i32 3
40    %add2.i.i = fadd fast float %16, %16
41    %conv.i = uitofp i32 %blockSize to float
42    %div.i = fdiv fast float %add2.i.i, %conv.i
43    %17 = bitcast float %div.i to i32
44    %18 = insertelement <4 x i32> undef, i32 %17, i64 0
45    %19 = shufflevector <4 x i32> %18, <4 x i32> undef, <4 x i32> zeroinitializer
46    %20 = bitcast <4 x i32> %19 to <4 x float>
47    %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %4)
48    br label %do.body
49
50  do.body:                                          ; preds = %do.body, %arm_mean_f32_mve.exit
51    %blkCnt.0 = phi i32 [ %blockSize, %arm_mean_f32_mve.exit ], [ %26, %do.body ]
52    %sumVec.0 = phi <4 x float> [ zeroinitializer, %arm_mean_f32_mve.exit ], [ %25, %do.body ]
53    %pSrc.addr.0 = phi float* [ %pSrc, %arm_mean_f32_mve.exit ], [ %add.ptr, %do.body ]
54    %21 = phi i32 [ %start2, %arm_mean_f32_mve.exit ], [ %27, %do.body ]
55    %pSrc.addr.01 = bitcast float* %pSrc.addr.0 to <4 x float>*
56    %22 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0)
57    %23 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %pSrc.addr.01, i32 4, <4 x i1> %22, <4 x float> zeroinitializer)
58    %24 = tail call fast <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %23, <4 x float> %20, <4 x i1> %22, <4 x float> undef)
59    %25 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %24, <4 x float> %24, <4 x float> %sumVec.0, <4 x i1> %22)
60    %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4
61    %26 = add i32 %blkCnt.0, -4
62    %27 = call i32 @llvm.loop.decrement.reg.i32(i32 %21, i32 1)
63    %28 = icmp ne i32 %27, 0
64    br i1 %28, label %do.body, label %do.end
65
66  do.end:                                           ; preds = %do.body
67    %29 = extractelement <4 x float> %25, i32 3
68    %add2.i = fadd fast float %29, %29
69    %sub2 = add i32 %blockSize, -1
70    %conv = uitofp i32 %sub2 to float
71    %div = fdiv fast float %add2.i, %conv
72    store float %div, float* %pResult, align 4
73    ret void
74  }
75
76  ; Function Attrs: nounwind readnone
77  declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #1
78
79  ; Function Attrs: nounwind readnone
80  declare <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x float>, <4 x i1>) #1
81
82  ; Function Attrs: nounwind readnone
83  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
84
85  ; Function Attrs: argmemonly nounwind readonly willreturn
86  declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>) #2
87
88  ; Function Attrs: nounwind readnone
89  declare <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #1
90
91  ; Function Attrs: noduplicate nounwind
92  declare i32 @llvm.start.loop.iterations.i32(i32) #3
93
94  ; Function Attrs: noduplicate nounwind
95  declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #3
96
97  attributes #0 = { "target-features"="+mve.fp" }
98  attributes #1 = { nounwind readnone "target-features"="+mve.fp" }
99  attributes #2 = { argmemonly nounwind readonly willreturn "target-features"="+mve.fp" }
100  attributes #3 = { noduplicate nounwind }
101
102...
103---
104name:            arm_var_f32_mve
105alignment:       2
106exposesReturnsTwice: false
107legalized:       false
108regBankSelected: false
109selected:        false
110failedISel:      false
111tracksRegLiveness: true
112hasWinCFI:       false
113registers:       []
114liveins:
115  - { reg: '$r0', virtual-reg: '' }
116  - { reg: '$r1', virtual-reg: '' }
117  - { reg: '$r2', virtual-reg: '' }
118frameInfo:
119  isFrameAddressTaken: false
120  isReturnAddressTaken: false
121  hasStackMap:     false
122  hasPatchPoint:   false
123  stackSize:       8
124  offsetAdjustment: 0
125  maxAlignment:    4
126  adjustsStack:    false
127  hasCalls:        false
128  stackProtector:  ''
129  maxCallFrameSize: 0
130  cvBytesOfCalleeSavedRegisters: 0
131  hasOpaqueSPAdjustment: false
132  hasVAStart:      false
133  hasMustTailInVarArgFunc: false
134  localFrameSize:  0
135  savePoint:       ''
136  restorePoint:    ''
137fixedStack:      []
138stack:
139  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
140      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
141      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
142  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
143      stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
144      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
145callSites:       []
146constants:       []
147machineFunctionInfo: {}
148body:             |
149  ; CHECK-LABEL: name: arm_var_f32_mve
150  ; CHECK: bb.0.entry:
151  ; CHECK:   successors: %bb.1(0x80000000)
152  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r4
153  ; CHECK:   frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
154  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
155  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
156  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r4, -8
157  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
158  ; CHECK:   $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
159  ; CHECK:   $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
160  ; CHECK:   $lr = MVE_DLSTP_32 killed renamable $r3
161  ; CHECK:   $r4 = tMOVr $lr, 14 /* CC::al */, $noreg
162  ; CHECK: bb.1.do.body.i:
163  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
164  ; CHECK:   liveins: $lr, $q0, $r0, $r1, $r2, $r4, $r12
165  ; CHECK:   renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg :: (load (s128) from %ir.pSrc.addr.0.i2, align 4)
166  ; CHECK:   renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, killed renamable $q0
167  ; CHECK:   $lr = MVE_LETP killed renamable $lr, %bb.1
168  ; CHECK: bb.2.arm_mean_f32_mve.exit:
169  ; CHECK:   successors: %bb.3(0x80000000)
170  ; CHECK:   liveins: $q0, $r0, $r1, $r2, $r4
171  ; CHECK:   $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg
172  ; CHECK:   dead $lr = tMOVr $r4, 14 /* CC::al */, $noreg
173  ; CHECK:   renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
174  ; CHECK:   $lr = t2DLS killed $r4
175  ; CHECK:   renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg
176  ; CHECK:   renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg
177  ; CHECK:   renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg
178  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
179  ; CHECK:   renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
180  ; CHECK:   $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
181  ; CHECK: bb.3.do.body:
182  ; CHECK:   successors: %bb.3(0x7c000000), %bb.4(0x04000000)
183  ; CHECK:   liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3
184  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
185  ; CHECK:   renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
186  ; CHECK:   MVE_VPST 2, implicit $vpr
187  ; CHECK:   renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load (s128) from %ir.pSrc.addr.01, align 4)
188  ; CHECK:   renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 1, renamable $vpr, undef renamable $q2
189  ; CHECK:   renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr
190  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.3
191  ; CHECK: bb.4.do.end:
192  ; CHECK:   liveins: $q0, $r1, $r2
193  ; CHECK:   renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
194  ; CHECK:   renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
195  ; CHECK:   $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
196  ; CHECK:   renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
197  ; CHECK:   renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
198  ; CHECK:   VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pResult)
199  ; CHECK:   frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
200  bb.0.entry:
201    successors: %bb.1(0x80000000)
202    liveins: $r0, $r1, $r2, $r4, $lr
203
204    frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
205    frame-setup CFI_INSTRUCTION def_cfa_offset 8
206    frame-setup CFI_INSTRUCTION offset $lr, -4
207    frame-setup CFI_INSTRUCTION offset $r4, -8
208    $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
209    tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
210    t2IT 10, 8, implicit-def $itstate
211    renamable $r3 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r3, implicit killed $itstate
212    renamable $r12 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
213    renamable $r3, dead $cpsr = tSUBrr renamable $r1, killed renamable $r3, 14 /* CC::al */, $noreg
214    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
215    renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg
216    renamable $lr = nuw nsw t2ADDrs killed renamable $r12, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
217    $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
218    $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
219    $lr = t2DoLoopStart renamable $lr
220    $r4 = tMOVr $lr, 14 /* CC::al */, $noreg
221
222  bb.1.do.body.i:
223    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
224    liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r12
225
226    renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
227    renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
228    renamable $lr = t2LoopDec killed renamable $lr, 1
229    MVE_VPST 4, implicit $vpr
230    renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 1, renamable $vpr :: (load (s128) from %ir.pSrc.addr.0.i2, align 4)
231    renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VADDf32 killed renamable $q0, killed renamable $q1, 1, killed renamable $vpr, renamable $q0
232    t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
233    tB %bb.2, 14 /* CC::al */, $noreg
234
235  bb.2.arm_mean_f32_mve.exit:
236    successors: %bb.3(0x80000000)
237    liveins: $q0, $r0, $r1, $r2, $r4
238
239    $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg
240    $lr = tMOVr $r4, 14 /* CC::al */, $noreg
241    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
242    $lr = t2DoLoopStart killed $r4
243    renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg
244    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg
245    renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg
246    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
247    renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
248    $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
249
250  bb.3.do.body:
251    successors: %bb.3(0x7c000000), %bb.4(0x04000000)
252    liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3
253
254    renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
255    renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
256    renamable $lr = t2LoopDec killed renamable $lr, 1
257    MVE_VPST 2, implicit $vpr
258    renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load (s128) from %ir.pSrc.addr.01, align 4)
259    renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 1, renamable $vpr, undef renamable $q2
260    renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, renamable $q2, 1, killed renamable $vpr
261    t2LoopEnd renamable $lr, %bb.3, implicit-def dead $cpsr
262    tB %bb.4, 14 /* CC::al */, $noreg
263
264  bb.4.do.end:
265    liveins: $q0, $r1, $r2
266
267    renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
268    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
269    $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
270    renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
271    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
272    VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pResult)
273    frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
274
275...
276