1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc %s -o - -mtriple=riscv64 -mattr=experimental-v \
3# RUN:     -run-pass=riscv-insert-vsetvli | FileCheck %s
4
5--- |
6  ; ModuleID = 'vsetvli-insert.ll'
7  source_filename = "vsetvli-insert.ll"
8  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
9  target triple = "riscv64"
10
11  define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
12  entry:
13    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
14    ret <vscale x 1 x i64> %a
15  }
16
17  define <vscale x 1 x i64> @load_add(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
18  entry:
19    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>* %0, i64 %2)
20    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
21    ret <vscale x 1 x i64> %b
22  }
23
24  define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
25  entry:
26    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>* %0, i64 %1)
27    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %1)
28    ret <vscale x 1 x i64> %b
29  }
30
31  ; Function Attrs: nounwind readnone
32  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1
33
34  define i64 @vmv_x_s(<vscale x 1 x i64> %0) #0 {
35  entry:
36    %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %0)
37    ret i64 %a
38  }
39
40  define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) #0 {
41    %a = load <2 x i64>, <2 x i64>* %x, align 16
42    %b = load <2 x i64>, <2 x i64>* %y, align 16
43    %c = add <2 x i64> %a, %b
44    store <2 x i64> %c, <2 x i64>* %x, align 16
45    ret void
46  }
47
48  ; Function Attrs: nofree nosync nounwind readnone willreturn
49  declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2
50
51  define i64 @vreduce_add_v2i64(<2 x i64>* %x) #0 {
52    %v = load <2 x i64>, <2 x i64>* %x, align 16
53    %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
54    ret i64 %red
55  }
56
57  ; Function Attrs: nounwind
58  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3
59
60  define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
61  entry:
62    %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
63    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
64    ret <vscale x 1 x i64> %b
65  }
66
67  define <vscale x 1 x i64> @load_add_inlineasm(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
68  entry:
69    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>* %0, i64 %2)
70    call void asm sideeffect "", ""()
71    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
72    ret <vscale x 1 x i64> %b
73  }
74
75  ; Function Attrs: nounwind readnone
76  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
77
78  ; Function Attrs: nounwind readonly
79  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>* nocapture, i64) #4
80
81  ; Function Attrs: nounwind readonly
82  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>* nocapture, i64) #4
83
84  ; Function Attrs: nounwind readnone
85  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
86
87  attributes #0 = { "target-features"="+experimental-v" }
88  attributes #1 = { nounwind readnone }
89  attributes #2 = { nofree nosync nounwind readnone willreturn }
90  attributes #3 = { nounwind }
91  attributes #4 = { nounwind readonly }
92
93...
94---
95name:            add
96alignment:       4
97tracksRegLiveness: true
98registers:
99  - { id: 0, class: vr }
100  - { id: 1, class: vr }
101  - { id: 2, class: gpr }
102  - { id: 3, class: vr }
103liveins:
104  - { reg: '$v8', virtual-reg: '%0' }
105  - { reg: '$v9', virtual-reg: '%1' }
106  - { reg: '$x10', virtual-reg: '%2' }
107frameInfo:
108  maxAlignment:    1
109machineFunctionInfo: {}
110body:             |
111  bb.0.entry:
112    liveins: $v8, $v9, $x10
113
114    ; CHECK-LABEL: name: add
115    ; CHECK: liveins: $v8, $v9, $x10
116    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
117    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
118    ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
119    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
120    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
121    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
122    ; CHECK: PseudoRET implicit $v8
123    %2:gpr = COPY $x10
124    %1:vr = COPY $v9
125    %0:vr = COPY $v8
126    %3:vr = PseudoVADD_VV_M1 %0, %1, %2, 6
127    $v8 = COPY %3
128    PseudoRET implicit $v8
129
130...
131---
132name:            load_add
133alignment:       4
134tracksRegLiveness: true
135registers:
136  - { id: 0, class: gpr }
137  - { id: 1, class: vr }
138  - { id: 2, class: gpr }
139  - { id: 3, class: vr }
140  - { id: 4, class: vr }
141liveins:
142  - { reg: '$x10', virtual-reg: '%0' }
143  - { reg: '$v8', virtual-reg: '%1' }
144  - { reg: '$x11', virtual-reg: '%2' }
145frameInfo:
146  maxAlignment:    1
147machineFunctionInfo: {}
148body:             |
149  bb.0.entry:
150    liveins: $x10, $v8, $x11
151
152    ; CHECK-LABEL: name: load_add
153    ; CHECK: liveins: $x10, $v8, $x11
154    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x11
155    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v8
156    ; CHECK: [[COPY2:%[0-9]+]]:gpr = COPY $x10
157    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
158    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
159    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
160    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
161    ; CHECK: PseudoRET implicit $v8
162    %2:gpr = COPY $x11
163    %1:vr = COPY $v8
164    %0:gpr = COPY $x10
165    %3:vr = PseudoVLE64_V_M1 %0, %2, 6
166    %4:vr = PseudoVADD_VV_M1 killed %3, %1, %2, 6
167    $v8 = COPY %4
168    PseudoRET implicit $v8
169
170...
171---
172name:            load_zext
173alignment:       4
174tracksRegLiveness: true
175registers:
176  - { id: 0, class: gpr }
177  - { id: 1, class: gpr }
178  - { id: 2, class: vr }
179  - { id: 3, class: vr }
180liveins:
181  - { reg: '$x10', virtual-reg: '%0' }
182  - { reg: '$x11', virtual-reg: '%1' }
183frameInfo:
184  maxAlignment:    1
185machineFunctionInfo: {}
186body:             |
187  bb.0.entry:
188    liveins: $x10, $x11
189
190    ; CHECK-LABEL: name: load_zext
191    ; CHECK: liveins: $x10, $x11
192    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x11
193    ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
194    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype
195    ; CHECK: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5, implicit $vl, implicit $vtype
196    ; CHECK: dead $x0 = PseudoVSETVLI killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
197    ; CHECK: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
198    ; CHECK: $v8 = COPY %3
199    ; CHECK: PseudoRET implicit $v8
200    %1:gpr = COPY $x11
201    %0:gpr = COPY $x10
202    %2:vr = PseudoVLE32_V_MF2 %0, %1, 5
203    early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6
204    $v8 = COPY %3
205    PseudoRET implicit $v8
206
207...
208---
209name:            vmv_x_s
210alignment:       4
211tracksRegLiveness: true
212registers:
213  - { id: 0, class: vr }
214  - { id: 1, class: gpr }
215liveins:
216  - { reg: '$v8', virtual-reg: '%0' }
217frameInfo:
218  maxAlignment:    1
219machineFunctionInfo: {}
220body:             |
221  bb.0.entry:
222    liveins: $v8
223
224    ; CHECK-LABEL: name: vmv_x_s
225    ; CHECK: liveins: $v8
226    ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8
227    ; CHECK: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype
228    ; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype
229    ; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]]
230    ; CHECK: PseudoRET implicit $x10
231    %0:vr = COPY $v8
232    %1:gpr = PseudoVMV_X_S_M1 %0, 6
233    $x10 = COPY %1
234    PseudoRET implicit $x10
235
236...
237---
238name:            add_v2i64
239alignment:       4
240tracksRegLiveness: true
241registers:
242  - { id: 0, class: gpr }
243  - { id: 1, class: gpr }
244  - { id: 2, class: vr }
245  - { id: 3, class: vr }
246  - { id: 4, class: vr }
247liveins:
248  - { reg: '$x10', virtual-reg: '%0' }
249  - { reg: '$x11', virtual-reg: '%1' }
250frameInfo:
251  maxAlignment:    1
252machineFunctionInfo: {}
253body:             |
254  bb.0 (%ir-block.0):
255    liveins: $x10, $x11
256
257    ; CHECK-LABEL: name: add_v2i64
258    ; CHECK: liveins: $x10, $x11
259    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x11
260    ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10
261    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
262    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
263    ; CHECK: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
264    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6, implicit $vl, implicit $vtype
265    ; CHECK: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
266    ; CHECK: PseudoRET
267    %1:gpr = COPY $x11
268    %0:gpr = COPY $x10
269    %2:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
270    %3:vr = PseudoVLE64_V_M1 %1, 2, 6 :: (load (s128) from %ir.y)
271    %4:vr = PseudoVADD_VV_M1 killed %2, killed %3, 2, 6
272    PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
273    PseudoRET
274
275...
276---
277name:            vreduce_add_v2i64
278alignment:       4
279tracksRegLiveness: true
280registers:
281  - { id: 0, class: gpr }
282  - { id: 1, class: vr }
283  - { id: 2, class: vr }
284  - { id: 3, class: vr }
285  - { id: 4, class: vr }
286  - { id: 5, class: gpr }
287liveins:
288  - { reg: '$x10', virtual-reg: '%0' }
289frameInfo:
290  maxAlignment:    1
291machineFunctionInfo: {}
292body:             |
293  bb.0 (%ir-block.0):
294    liveins: $x10
295
296    ; CHECK-LABEL: name: vreduce_add_v2i64
297    ; CHECK: liveins: $x10
298    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
299    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
300    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
301    ; CHECK: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
302    ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, $noreg, 6, implicit $vl, implicit $vtype
303    ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
304    ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
305    ; CHECK: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype
306    ; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6, implicit $vtype
307    ; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]]
308    ; CHECK: PseudoRET implicit $x10
309    %0:gpr = COPY $x10
310    %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
311    %2:vr = PseudoVMV_V_I_M1 0, $x0, 6
312    %4:vr = IMPLICIT_DEF
313    %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6
314    %5:gpr = PseudoVMV_X_S_M1 killed %3, 6
315    $x10 = COPY %5
316    PseudoRET implicit $x10
317
318...
319---
320name:            vsetvli_add
321alignment:       4
322tracksRegLiveness: true
323registers:
324  - { id: 0, class: vr }
325  - { id: 1, class: vr }
326  - { id: 2, class: gpr }
327  - { id: 3, class: gpr }
328  - { id: 4, class: vr }
329liveins:
330  - { reg: '$v8', virtual-reg: '%0' }
331  - { reg: '$v9', virtual-reg: '%1' }
332  - { reg: '$x10', virtual-reg: '%2' }
333frameInfo:
334  maxAlignment:    1
335machineFunctionInfo: {}
336body:             |
337  bb.0.entry:
338    liveins: $v8, $v9, $x10
339
340    ; CHECK-LABEL: name: vsetvli_add
341    ; CHECK: liveins: $v8, $v9, $x10
342    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
343    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
344    ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
345    ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
346    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
347    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
348    ; CHECK: PseudoRET implicit $v8
349    %2:gpr = COPY $x10
350    %1:vr = COPY $v9
351    %0:vr = COPY $v8
352    %3:gpr = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
353    %4:vr = PseudoVADD_VV_M1 %0, %1, killed %3, 6
354    $v8 = COPY %4
355    PseudoRET implicit $v8
356
357...
358---
359name:            load_add_inlineasm
360alignment:       4
361tracksRegLiveness: true
362registers:
363  - { id: 0, class: gpr }
364  - { id: 1, class: vr }
365  - { id: 2, class: gpr }
366  - { id: 3, class: vr }
367  - { id: 4, class: vr }
368liveins:
369  - { reg: '$x10', virtual-reg: '%0' }
370  - { reg: '$v8', virtual-reg: '%1' }
371  - { reg: '$x11', virtual-reg: '%2' }
372frameInfo:
373  maxAlignment:    1
374machineFunctionInfo: {}
375body:             |
376  bb.0.entry:
377    liveins: $x10, $v8, $x11
378
379    ; CHECK-LABEL: name: load_add_inlineasm
380    ; CHECK: liveins: $x10, $v8, $x11
381    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x11
382    ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v8
383    ; CHECK: [[COPY2:%[0-9]+]]:gpr = COPY $x10
384    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
385    ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype
386    ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
387    ; CHECK: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
388    ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
389    ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
390    ; CHECK: PseudoRET implicit $v8
391    %2:gpr = COPY $x11
392    %1:vr = COPY $v8
393    %0:gpr = COPY $x10
394    %3:vr = PseudoVLE64_V_M1 %0, %2, 6
395    INLINEASM &"", 1 /* sideeffect attdialect */
396    %4:vr = PseudoVADD_VV_M1 killed %3, %1, %2, 6
397    $v8 = COPY %4
398    PseudoRET implicit $v8
399
400...
401