1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test prefetch vector intrinsic instructions
4;;;
5;;; Note:
6;;;   We test LSVrr_v and LVSvr instructions.
7
8; Function Attrs: nounwind
9define void @lsv_vvss(i8* %0, i64 %1, i32 signext %2) {
10; CHECK-LABEL: lsv_vvss:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    lea %s3, 256
13; CHECK-NEXT:    lvl %s3
14; CHECK-NEXT:    vld %v0, 8, %s0
15; CHECK-NEXT:    and %s2, %s2, (32)0
16; CHECK-NEXT:    lsv %v0(%s2), %s1
17; CHECK-NEXT:    vst %v0, 8, %s0
18; CHECK-NEXT:    b.l.t (, %s10)
19  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
20  %5 = tail call fast <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double> %4, i32 %2, i64 %1)
21  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256)
22  ret void
23}
24
25; Function Attrs: nounwind readonly
26declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
27
28; Function Attrs: nounwind readnone
29declare <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double>, i32, i64)
30
31; Function Attrs: nounwind writeonly
32declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
33
34; Function Attrs: nounwind readonly
35define i64 @lvsl_vssl_imm(i8* readonly %0, i32 signext %1) {
36; CHECK-LABEL: lvsl_vssl_imm:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    lea %s2, 256
39; CHECK-NEXT:    lvl %s2
40; CHECK-NEXT:    vld %v0, 8, %s0
41; CHECK-NEXT:    and %s0, %s1, (32)0
42; CHECK-NEXT:    lvs %s0, %v0(%s0)
43; CHECK-NEXT:    b.l.t (, %s10)
44  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
45  %4 = tail call i64 @llvm.ve.vl.lvsl.svs(<256 x double> %3, i32 %1)
46  ret i64 %4
47}
48
49; Function Attrs: nounwind readnone
50declare i64 @llvm.ve.vl.lvsl.svs(<256 x double>, i32)
51
52; Function Attrs: nounwind readonly
53define double @lvsd_vssl_imm(i8* readonly %0, i32 signext %1) {
54; CHECK-LABEL: lvsd_vssl_imm:
55; CHECK:       # %bb.0:
56; CHECK-NEXT:    lea %s2, 256
57; CHECK-NEXT:    lvl %s2
58; CHECK-NEXT:    vld %v0, 8, %s0
59; CHECK-NEXT:    and %s0, %s1, (32)0
60; CHECK-NEXT:    lvs %s0, %v0(%s0)
61; CHECK-NEXT:    b.l.t (, %s10)
62  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
63  %4 = tail call fast double @llvm.ve.vl.lvsd.svs(<256 x double> %3, i32 %1)
64  ret double %4
65}
66
67; Function Attrs: nounwind readnone
68declare double @llvm.ve.vl.lvsd.svs(<256 x double>, i32)
69
70; Function Attrs: nounwind readonly
71define float @lvss_vssl_imm(i8* readonly %0, i32 signext %1) {
72; CHECK-LABEL: lvss_vssl_imm:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    lea %s2, 256
75; CHECK-NEXT:    lvl %s2
76; CHECK-NEXT:    vld %v0, 8, %s0
77; CHECK-NEXT:    and %s0, %s1, (32)0
78; CHECK-NEXT:    lvs %s0, %v0(%s0)
79; CHECK-NEXT:    b.l.t (, %s10)
80  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
81  %4 = tail call fast float @llvm.ve.vl.lvss.svs(<256 x double> %3, i32 %1)
82  ret float %4
83}
84
85; Function Attrs: nounwind readnone
86declare float @llvm.ve.vl.lvss.svs(<256 x double>, i32)
87