1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vneg_v_i8mf8(vint8mf8_t op1,size_t vl)14 vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
15   return vneg_v_i8mf8(op1, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
23 //
test_vneg_v_i8mf4(vint8mf4_t op1,size_t vl)24 vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
25   return vneg_v_i8mf4(op1, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
33 //
test_vneg_v_i8mf2(vint8mf2_t op1,size_t vl)34 vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
35   return vneg_v_i8mf2(op1, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vneg_v_i8m1(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
43 //
test_vneg_v_i8m1(vint8m1_t op1,size_t vl)44 vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
45   return vneg_v_i8m1(op1, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vneg_v_i8m2(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
53 //
test_vneg_v_i8m2(vint8m2_t op1,size_t vl)54 vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
55   return vneg_v_i8m2(op1, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vneg_v_i8m4(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
63 //
test_vneg_v_i8m4(vint8m4_t op1,size_t vl)64 vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
65   return vneg_v_i8m4(op1, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vneg_v_i8m8(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
73 //
test_vneg_v_i8m8(vint8m8_t op1,size_t vl)74 vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
75   return vneg_v_i8m8(op1, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
83 //
test_vneg_v_i16mf4(vint16mf4_t op1,size_t vl)84 vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
85   return vneg_v_i16mf4(op1, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
93 //
test_vneg_v_i16mf2(vint16mf2_t op1,size_t vl)94 vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
95   return vneg_v_i16mf2(op1, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vneg_v_i16m1(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
103 //
test_vneg_v_i16m1(vint16m1_t op1,size_t vl)104 vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
105   return vneg_v_i16m1(op1, vl);
106 }
107 
108 //
109 // CHECK-RV64-LABEL: @test_vneg_v_i16m2(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
113 //
test_vneg_v_i16m2(vint16m2_t op1,size_t vl)114 vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
115   return vneg_v_i16m2(op1, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vneg_v_i16m4(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
123 //
test_vneg_v_i16m4(vint16m4_t op1,size_t vl)124 vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
125   return vneg_v_i16m4(op1, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vneg_v_i16m8(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
133 //
test_vneg_v_i16m8(vint16m8_t op1,size_t vl)134 vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
135   return vneg_v_i16m8(op1, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
143 //
test_vneg_v_i32mf2(vint32mf2_t op1,size_t vl)144 vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
145   return vneg_v_i32mf2(op1, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vneg_v_i32m1(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
153 //
test_vneg_v_i32m1(vint32m1_t op1,size_t vl)154 vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
155   return vneg_v_i32m1(op1, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vneg_v_i32m2(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
163 //
test_vneg_v_i32m2(vint32m2_t op1,size_t vl)164 vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
165   return vneg_v_i32m2(op1, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vneg_v_i32m4(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
173 //
test_vneg_v_i32m4(vint32m4_t op1,size_t vl)174 vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
175   return vneg_v_i32m4(op1, vl);
176 }
177 
178 //
179 // CHECK-RV64-LABEL: @test_vneg_v_i32m8(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
183 //
test_vneg_v_i32m8(vint32m8_t op1,size_t vl)184 vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
185   return vneg_v_i32m8(op1, vl);
186 }
187 
188 //
189 // CHECK-RV64-LABEL: @test_vneg_v_i64m1(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
193 //
test_vneg_v_i64m1(vint64m1_t op1,size_t vl)194 vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
195   return vneg_v_i64m1(op1, vl);
196 }
197 
198 //
199 // CHECK-RV64-LABEL: @test_vneg_v_i64m2(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
203 //
test_vneg_v_i64m2(vint64m2_t op1,size_t vl)204 vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
205   return vneg_v_i64m2(op1, vl);
206 }
207 
208 //
209 // CHECK-RV64-LABEL: @test_vneg_v_i64m4(
210 // CHECK-RV64-NEXT:  entry:
211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
212 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
213 //
test_vneg_v_i64m4(vint64m4_t op1,size_t vl)214 vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
215   return vneg_v_i64m4(op1, vl);
216 }
217 
218 //
219 // CHECK-RV64-LABEL: @test_vneg_v_i64m8(
220 // CHECK-RV64-NEXT:  entry:
221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
223 //
test_vneg_v_i64m8(vint64m8_t op1,size_t vl)224 vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
225   return vneg_v_i64m8(op1, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
233 //
test_vneg_v_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,size_t vl)234 vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) {
235   return vneg_v_i8mf8_m(mask, maskedoff, op1, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
243 //
test_vneg_v_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,size_t vl)244 vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) {
245   return vneg_v_i8mf4_m(mask, maskedoff, op1, vl);
246 }
247 
248 //
249 // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m(
250 // CHECK-RV64-NEXT:  entry:
251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
252 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
253 //
test_vneg_v_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,size_t vl)254 vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) {
255   return vneg_v_i8mf2_m(mask, maskedoff, op1, vl);
256 }
257 
258 //
259 // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
263 //
test_vneg_v_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,size_t vl)264 vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) {
265   return vneg_v_i8m1_m(mask, maskedoff, op1, vl);
266 }
267 
268 //
269 // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
273 //
test_vneg_v_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,size_t vl)274 vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) {
275   return vneg_v_i8m2_m(mask, maskedoff, op1, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
283 //
test_vneg_v_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,size_t vl)284 vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) {
285   return vneg_v_i8m4_m(mask, maskedoff, op1, vl);
286 }
287 
288 //
289 // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 0, <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
293 //
test_vneg_v_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,size_t vl)294 vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) {
295   return vneg_v_i8m8_m(mask, maskedoff, op1, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
303 //
test_vneg_v_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,size_t vl)304 vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) {
305   return vneg_v_i16mf4_m(mask, maskedoff, op1, vl);
306 }
307 
308 //
309 // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m(
310 // CHECK-RV64-NEXT:  entry:
311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
312 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
313 //
test_vneg_v_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,size_t vl)314 vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) {
315   return vneg_v_i16mf2_m(mask, maskedoff, op1, vl);
316 }
317 
318 //
319 // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m(
320 // CHECK-RV64-NEXT:  entry:
321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
322 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
323 //
test_vneg_v_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,size_t vl)324 vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) {
325   return vneg_v_i16m1_m(mask, maskedoff, op1, vl);
326 }
327 
328 //
329 // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m(
330 // CHECK-RV64-NEXT:  entry:
331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
332 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
333 //
test_vneg_v_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,size_t vl)334 vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) {
335   return vneg_v_i16m2_m(mask, maskedoff, op1, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
343 //
test_vneg_v_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,size_t vl)344 vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) {
345   return vneg_v_i16m4_m(mask, maskedoff, op1, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
353 //
test_vneg_v_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,size_t vl)354 vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) {
355   return vneg_v_i16m8_m(mask, maskedoff, op1, vl);
356 }
357 
358 //
359 // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
363 //
test_vneg_v_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,size_t vl)364 vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) {
365   return vneg_v_i32mf2_m(mask, maskedoff, op1, vl);
366 }
367 
368 //
369 // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
372 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
373 //
test_vneg_v_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,size_t vl)374 vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) {
375   return vneg_v_i32m1_m(mask, maskedoff, op1, vl);
376 }
377 
378 //
379 // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
383 //
test_vneg_v_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,size_t vl)384 vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) {
385   return vneg_v_i32m2_m(mask, maskedoff, op1, vl);
386 }
387 
388 //
389 // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
393 //
test_vneg_v_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,size_t vl)394 vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) {
395   return vneg_v_i32m4_m(mask, maskedoff, op1, vl);
396 }
397 
398 //
399 // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m(
400 // CHECK-RV64-NEXT:  entry:
401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
402 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
403 //
test_vneg_v_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,size_t vl)404 vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) {
405   return vneg_v_i32m8_m(mask, maskedoff, op1, vl);
406 }
407 
408 //
409 // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
413 //
test_vneg_v_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,size_t vl)414 vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) {
415   return vneg_v_i64m1_m(mask, maskedoff, op1, vl);
416 }
417 
418 //
419 // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m(
420 // CHECK-RV64-NEXT:  entry:
421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
422 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
423 //
test_vneg_v_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,size_t vl)424 vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) {
425   return vneg_v_i64m2_m(mask, maskedoff, op1, vl);
426 }
427 
428 //
429 // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m(
430 // CHECK-RV64-NEXT:  entry:
431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
432 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
433 //
test_vneg_v_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,size_t vl)434 vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) {
435   return vneg_v_i64m4_m(mask, maskedoff, op1, vl);
436 }
437 
438 //
439 // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
443 //
test_vneg_v_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,size_t vl)444 vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) {
445   return vneg_v_i64m8_m(mask, maskedoff, op1, vl);
446 }
447 
448