1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vcompress_vm_i8mf8(vbool64_t mask,vint8mf8_t dest,vint8mf8_t src,size_t vl)13 vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) {
14   return vcompress(mask, dest, src, vl);
15 }
16 
17 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
18 // CHECK-RV64-NEXT:  entry:
19 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
20 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
21 //
test_vcompress_vm_i8mf4(vbool32_t mask,vint8mf4_t dest,vint8mf4_t src,size_t vl)22 vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) {
23   return vcompress(mask, dest, src, vl);
24 }
25 
26 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
27 // CHECK-RV64-NEXT:  entry:
28 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
29 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
30 //
test_vcompress_vm_i8mf2(vbool16_t mask,vint8mf2_t dest,vint8mf2_t src,size_t vl)31 vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) {
32   return vcompress(mask, dest, src, vl);
33 }
34 
35 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
36 // CHECK-RV64-NEXT:  entry:
37 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
38 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
39 //
test_vcompress_vm_i8m1(vbool8_t mask,vint8m1_t dest,vint8m1_t src,size_t vl)40 vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) {
41   return vcompress(mask, dest, src, vl);
42 }
43 
44 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
45 // CHECK-RV64-NEXT:  entry:
46 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
47 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
48 //
test_vcompress_vm_i8m2(vbool4_t mask,vint8m2_t dest,vint8m2_t src,size_t vl)49 vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) {
50   return vcompress(mask, dest, src, vl);
51 }
52 
53 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
57 //
test_vcompress_vm_i8m4(vbool2_t mask,vint8m4_t dest,vint8m4_t src,size_t vl)58 vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) {
59   return vcompress(mask, dest, src, vl);
60 }
61 
62 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
65 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
66 //
test_vcompress_vm_i8m8(vbool1_t mask,vint8m8_t dest,vint8m8_t src,size_t vl)67 vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) {
68   return vcompress(mask, dest, src, vl);
69 }
70 
71 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
72 // CHECK-RV64-NEXT:  entry:
73 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
74 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
75 //
test_vcompress_vm_i16mf4(vbool64_t mask,vint16mf4_t dest,vint16mf4_t src,size_t vl)76 vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) {
77   return vcompress(mask, dest, src, vl);
78 }
79 
80 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
83 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
84 //
test_vcompress_vm_i16mf2(vbool32_t mask,vint16mf2_t dest,vint16mf2_t src,size_t vl)85 vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) {
86   return vcompress(mask, dest, src, vl);
87 }
88 
89 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
93 //
test_vcompress_vm_i16m1(vbool16_t mask,vint16m1_t dest,vint16m1_t src,size_t vl)94 vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) {
95   return vcompress(mask, dest, src, vl);
96 }
97 
98 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
102 //
test_vcompress_vm_i16m2(vbool8_t mask,vint16m2_t dest,vint16m2_t src,size_t vl)103 vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) {
104   return vcompress(mask, dest, src, vl);
105 }
106 
107 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
108 // CHECK-RV64-NEXT:  entry:
109 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
110 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
111 //
test_vcompress_vm_i16m4(vbool4_t mask,vint16m4_t dest,vint16m4_t src,size_t vl)112 vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) {
113   return vcompress(mask, dest, src, vl);
114 }
115 
116 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
117 // CHECK-RV64-NEXT:  entry:
118 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
119 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
120 //
test_vcompress_vm_i16m8(vbool2_t mask,vint16m8_t dest,vint16m8_t src,size_t vl)121 vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) {
122   return vcompress(mask, dest, src, vl);
123 }
124 
125 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
126 // CHECK-RV64-NEXT:  entry:
127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
128 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
129 //
test_vcompress_vm_i32mf2(vbool64_t mask,vint32mf2_t dest,vint32mf2_t src,size_t vl)130 vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) {
131   return vcompress(mask, dest, src, vl);
132 }
133 
134 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
135 // CHECK-RV64-NEXT:  entry:
136 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
137 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
138 //
test_vcompress_vm_i32m1(vbool32_t mask,vint32m1_t dest,vint32m1_t src,size_t vl)139 vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) {
140   return vcompress(mask, dest, src, vl);
141 }
142 
143 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
144 // CHECK-RV64-NEXT:  entry:
145 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
146 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
147 //
test_vcompress_vm_i32m2(vbool16_t mask,vint32m2_t dest,vint32m2_t src,size_t vl)148 vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) {
149   return vcompress(mask, dest, src, vl);
150 }
151 
152 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
156 //
test_vcompress_vm_i32m4(vbool8_t mask,vint32m4_t dest,vint32m4_t src,size_t vl)157 vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) {
158   return vcompress(mask, dest, src, vl);
159 }
160 
161 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
162 // CHECK-RV64-NEXT:  entry:
163 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
164 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
165 //
test_vcompress_vm_i32m8(vbool4_t mask,vint32m8_t dest,vint32m8_t src,size_t vl)166 vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) {
167   return vcompress(mask, dest, src, vl);
168 }
169 
170 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
171 // CHECK-RV64-NEXT:  entry:
172 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
173 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
174 //
test_vcompress_vm_i64m1(vbool64_t mask,vint64m1_t dest,vint64m1_t src,size_t vl)175 vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) {
176   return vcompress(mask, dest, src, vl);
177 }
178 
179 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
183 //
test_vcompress_vm_i64m2(vbool32_t mask,vint64m2_t dest,vint64m2_t src,size_t vl)184 vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) {
185   return vcompress(mask, dest, src, vl);
186 }
187 
188 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
192 //
test_vcompress_vm_i64m4(vbool16_t mask,vint64m4_t dest,vint64m4_t src,size_t vl)193 vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) {
194   return vcompress(mask, dest, src, vl);
195 }
196 
197 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
198 // CHECK-RV64-NEXT:  entry:
199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
200 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
201 //
test_vcompress_vm_i64m8(vbool8_t mask,vint64m8_t dest,vint64m8_t src,size_t vl)202 vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) {
203   return vcompress(mask, dest, src, vl);
204 }
205 
206 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
209 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
210 //
test_vcompress_vm_u8mf8(vbool64_t mask,vuint8mf8_t dest,vuint8mf8_t src,size_t vl)211 vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) {
212   return vcompress(mask, dest, src, vl);
213 }
214 
215 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
216 // CHECK-RV64-NEXT:  entry:
217 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
218 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
219 //
test_vcompress_vm_u8mf4(vbool32_t mask,vuint8mf4_t dest,vuint8mf4_t src,size_t vl)220 vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) {
221   return vcompress(mask, dest, src, vl);
222 }
223 
224 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
225 // CHECK-RV64-NEXT:  entry:
226 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
227 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
228 //
test_vcompress_vm_u8mf2(vbool16_t mask,vuint8mf2_t dest,vuint8mf2_t src,size_t vl)229 vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) {
230   return vcompress(mask, dest, src, vl);
231 }
232 
233 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
234 // CHECK-RV64-NEXT:  entry:
235 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
236 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
237 //
test_vcompress_vm_u8m1(vbool8_t mask,vuint8m1_t dest,vuint8m1_t src,size_t vl)238 vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) {
239   return vcompress(mask, dest, src, vl);
240 }
241 
242 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
243 // CHECK-RV64-NEXT:  entry:
244 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
245 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
246 //
test_vcompress_vm_u8m2(vbool4_t mask,vuint8m2_t dest,vuint8m2_t src,size_t vl)247 vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) {
248   return vcompress(mask, dest, src, vl);
249 }
250 
251 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
255 //
test_vcompress_vm_u8m4(vbool2_t mask,vuint8m4_t dest,vuint8m4_t src,size_t vl)256 vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) {
257   return vcompress(mask, dest, src, vl);
258 }
259 
260 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
261 // CHECK-RV64-NEXT:  entry:
262 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
264 //
test_vcompress_vm_u8m8(vbool1_t mask,vuint8m8_t dest,vuint8m8_t src,size_t vl)265 vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) {
266   return vcompress(mask, dest, src, vl);
267 }
268 
269 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
273 //
test_vcompress_vm_u16mf4(vbool64_t mask,vuint16mf4_t dest,vuint16mf4_t src,size_t vl)274 vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) {
275   return vcompress(mask, dest, src, vl);
276 }
277 
278 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
282 //
test_vcompress_vm_u16mf2(vbool32_t mask,vuint16mf2_t dest,vuint16mf2_t src,size_t vl)283 vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) {
284   return vcompress(mask, dest, src, vl);
285 }
286 
287 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
288 // CHECK-RV64-NEXT:  entry:
289 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
290 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
291 //
test_vcompress_vm_u16m1(vbool16_t mask,vuint16m1_t dest,vuint16m1_t src,size_t vl)292 vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) {
293   return vcompress(mask, dest, src, vl);
294 }
295 
296 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
297 // CHECK-RV64-NEXT:  entry:
298 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
299 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
300 //
test_vcompress_vm_u16m2(vbool8_t mask,vuint16m2_t dest,vuint16m2_t src,size_t vl)301 vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) {
302   return vcompress(mask, dest, src, vl);
303 }
304 
305 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
306 // CHECK-RV64-NEXT:  entry:
307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
309 //
test_vcompress_vm_u16m4(vbool4_t mask,vuint16m4_t dest,vuint16m4_t src,size_t vl)310 vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) {
311   return vcompress(mask, dest, src, vl);
312 }
313 
314 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
315 // CHECK-RV64-NEXT:  entry:
316 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
317 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
318 //
test_vcompress_vm_u16m8(vbool2_t mask,vuint16m8_t dest,vuint16m8_t src,size_t vl)319 vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) {
320   return vcompress(mask, dest, src, vl);
321 }
322 
323 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
324 // CHECK-RV64-NEXT:  entry:
325 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
326 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
327 //
test_vcompress_vm_u32mf2(vbool64_t mask,vuint32mf2_t dest,vuint32mf2_t src,size_t vl)328 vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) {
329   return vcompress(mask, dest, src, vl);
330 }
331 
332 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
335 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
336 //
test_vcompress_vm_u32m1(vbool32_t mask,vuint32m1_t dest,vuint32m1_t src,size_t vl)337 vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) {
338   return vcompress(mask, dest, src, vl);
339 }
340 
341 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
342 // CHECK-RV64-NEXT:  entry:
343 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
344 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
345 //
test_vcompress_vm_u32m2(vbool16_t mask,vuint32m2_t dest,vuint32m2_t src,size_t vl)346 vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) {
347   return vcompress(mask, dest, src, vl);
348 }
349 
350 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
354 //
test_vcompress_vm_u32m4(vbool8_t mask,vuint32m4_t dest,vuint32m4_t src,size_t vl)355 vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) {
356   return vcompress(mask, dest, src, vl);
357 }
358 
359 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
363 //
test_vcompress_vm_u32m8(vbool4_t mask,vuint32m8_t dest,vuint32m8_t src,size_t vl)364 vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) {
365   return vcompress(mask, dest, src, vl);
366 }
367 
368 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
372 //
test_vcompress_vm_u64m1(vbool64_t mask,vuint64m1_t dest,vuint64m1_t src,size_t vl)373 vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) {
374   return vcompress(mask, dest, src, vl);
375 }
376 
377 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
378 // CHECK-RV64-NEXT:  entry:
379 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
380 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
381 //
test_vcompress_vm_u64m2(vbool32_t mask,vuint64m2_t dest,vuint64m2_t src,size_t vl)382 vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) {
383   return vcompress(mask, dest, src, vl);
384 }
385 
386 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
387 // CHECK-RV64-NEXT:  entry:
388 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
389 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
390 //
test_vcompress_vm_u64m4(vbool16_t mask,vuint64m4_t dest,vuint64m4_t src,size_t vl)391 vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) {
392   return vcompress(mask, dest, src, vl);
393 }
394 
395 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
396 // CHECK-RV64-NEXT:  entry:
397 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
398 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
399 //
test_vcompress_vm_u64m8(vbool8_t mask,vuint64m8_t dest,vuint64m8_t src,size_t vl)400 vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) {
401   return vcompress(mask, dest, src, vl);
402 }
403 
404 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
408 //
test_vcompress_vm_f32mf2(vbool64_t mask,vfloat32mf2_t dest,vfloat32mf2_t src,size_t vl)409 vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) {
410   return vcompress(mask, dest, src, vl);
411 }
412 
413 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
414 // CHECK-RV64-NEXT:  entry:
415 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
416 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
417 //
test_vcompress_vm_f32m1(vbool32_t mask,vfloat32m1_t dest,vfloat32m1_t src,size_t vl)418 vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) {
419   return vcompress(mask, dest, src, vl);
420 }
421 
422 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
423 // CHECK-RV64-NEXT:  entry:
424 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
425 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
426 //
test_vcompress_vm_f32m2(vbool16_t mask,vfloat32m2_t dest,vfloat32m2_t src,size_t vl)427 vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) {
428   return vcompress(mask, dest, src, vl);
429 }
430 
431 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
435 //
test_vcompress_vm_f32m4(vbool8_t mask,vfloat32m4_t dest,vfloat32m4_t src,size_t vl)436 vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) {
437   return vcompress(mask, dest, src, vl);
438 }
439 
440 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
441 // CHECK-RV64-NEXT:  entry:
442 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
443 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
444 //
test_vcompress_vm_f32m8(vbool4_t mask,vfloat32m8_t dest,vfloat32m8_t src,size_t vl)445 vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) {
446   return vcompress(mask, dest, src, vl);
447 }
448 
449 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
453 //
test_vcompress_vm_f64m1(vbool64_t mask,vfloat64m1_t dest,vfloat64m1_t src,size_t vl)454 vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) {
455   return vcompress(mask, dest, src, vl);
456 }
457 
458 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
462 //
test_vcompress_vm_f64m2(vbool32_t mask,vfloat64m2_t dest,vfloat64m2_t src,size_t vl)463 vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) {
464   return vcompress(mask, dest, src, vl);
465 }
466 
467 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
468 // CHECK-RV64-NEXT:  entry:
469 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
470 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
471 //
test_vcompress_vm_f64m4(vbool16_t mask,vfloat64m4_t dest,vfloat64m4_t src,size_t vl)472 vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) {
473   return vcompress(mask, dest, src, vl);
474 }
475 
476 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
477 // CHECK-RV64-NEXT:  entry:
478 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
479 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
480 //
test_vcompress_vm_f64m8(vbool8_t mask,vfloat64m8_t dest,vfloat64m8_t src,size_t vl)481 vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) {
482   return vcompress(mask, dest, src, vl);
483 }
484 
485 
486