1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
8 // CHECK-RV64-NEXT:  entry:
9 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
10 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
11 //
test_vmul_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)12 vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
13   return vmul(op1, op2, vl);
14 }
15 
16 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
17 // CHECK-RV64-NEXT:  entry:
18 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
19 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
20 //
test_vmul_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)21 vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
22   return vmul(op1, op2, vl);
23 }
24 
25 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
26 // CHECK-RV64-NEXT:  entry:
27 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
28 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
29 //
test_vmul_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)30 vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
31   return vmul(op1, op2, vl);
32 }
33 
34 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
35 // CHECK-RV64-NEXT:  entry:
36 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
37 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
38 //
test_vmul_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)39 vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
40   return vmul(op1, op2, vl);
41 }
42 
43 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
44 // CHECK-RV64-NEXT:  entry:
45 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
46 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
47 //
test_vmul_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)48 vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
49   return vmul(op1, op2, vl);
50 }
51 
52 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
53 // CHECK-RV64-NEXT:  entry:
54 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
56 //
test_vmul_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)57 vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
58   return vmul(op1, op2, vl);
59 }
60 
61 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
62 // CHECK-RV64-NEXT:  entry:
63 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
64 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
65 //
test_vmul_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)66 vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
67   return vmul(op1, op2, vl);
68 }
69 
70 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
71 // CHECK-RV64-NEXT:  entry:
72 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
73 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
74 //
test_vmul_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)75 vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
76   return vmul(op1, op2, vl);
77 }
78 
79 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
83 //
test_vmul_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)84 vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
85   return vmul(op1, op2, vl);
86 }
87 
88 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vmul_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)93 vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
94   return vmul(op1, op2, vl);
95 }
96 
97 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
98 // CHECK-RV64-NEXT:  entry:
99 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
101 //
test_vmul_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)102 vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
103   return vmul(op1, op2, vl);
104 }
105 
106 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
107 // CHECK-RV64-NEXT:  entry:
108 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
109 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
110 //
test_vmul_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)111 vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
112   return vmul(op1, op2, vl);
113 }
114 
115 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
116 // CHECK-RV64-NEXT:  entry:
117 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
118 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
119 //
test_vmul_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)120 vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
121   return vmul(op1, op2, vl);
122 }
123 
124 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
125 // CHECK-RV64-NEXT:  entry:
126 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
127 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
128 //
test_vmul_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)129 vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
130   return vmul(op1, op2, vl);
131 }
132 
133 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
134 // CHECK-RV64-NEXT:  entry:
135 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
136 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
137 //
test_vmul_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)138 vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
139   return vmul(op1, op2, vl);
140 }
141 
142 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
143 // CHECK-RV64-NEXT:  entry:
144 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
145 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
146 //
test_vmul_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)147 vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
148   return vmul(op1, op2, vl);
149 }
150 
151 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
152 // CHECK-RV64-NEXT:  entry:
153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
155 //
test_vmul_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)156 vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
157   return vmul(op1, op2, vl);
158 }
159 
160 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
161 // CHECK-RV64-NEXT:  entry:
162 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
164 //
test_vmul_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)165 vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
166   return vmul(op1, op2, vl);
167 }
168 
169 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
173 //
test_vmul_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)174 vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
175   return vmul(op1, op2, vl);
176 }
177 
178 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
182 //
test_vmul_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)183 vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
184   return vmul(op1, op2, vl);
185 }
186 
187 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
188 // CHECK-RV64-NEXT:  entry:
189 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
191 //
test_vmul_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)192 vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
193   return vmul(op1, op2, vl);
194 }
195 
196 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
197 // CHECK-RV64-NEXT:  entry:
198 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
200 //
test_vmul_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)201 vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
202   return vmul(op1, op2, vl);
203 }
204 
205 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
206 // CHECK-RV64-NEXT:  entry:
207 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
208 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
209 //
test_vmul_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)210 vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
211   return vmul(op1, op2, vl);
212 }
213 
214 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
215 // CHECK-RV64-NEXT:  entry:
216 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
217 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
218 //
test_vmul_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)219 vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
220   return vmul(op1, op2, vl);
221 }
222 
223 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
224 // CHECK-RV64-NEXT:  entry:
225 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
226 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
227 //
test_vmul_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)228 vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
229   return vmul(op1, op2, vl);
230 }
231 
232 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
233 // CHECK-RV64-NEXT:  entry:
234 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
235 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
236 //
test_vmul_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)237 vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
238   return vmul(op1, op2, vl);
239 }
240 
241 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
242 // CHECK-RV64-NEXT:  entry:
243 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
245 //
test_vmul_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)246 vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
247   return vmul(op1, op2, vl);
248 }
249 
250 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
254 //
test_vmul_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)255 vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
256   return vmul(op1, op2, vl);
257 }
258 
259 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
263 //
test_vmul_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)264 vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
265   return vmul(op1, op2, vl);
266 }
267 
268 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
272 //
test_vmul_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)273 vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
274   return vmul(op1, op2, vl);
275 }
276 
277 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
278 // CHECK-RV64-NEXT:  entry:
279 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
281 //
test_vmul_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)282 vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
283   return vmul(op1, op2, vl);
284 }
285 
286 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
287 // CHECK-RV64-NEXT:  entry:
288 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
289 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
290 //
test_vmul_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)291 vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
292   return vmul(op1, op2, vl);
293 }
294 
295 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
296 // CHECK-RV64-NEXT:  entry:
297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
299 //
test_vmul_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)300 vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
301   return vmul(op1, op2, vl);
302 }
303 
304 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
305 // CHECK-RV64-NEXT:  entry:
306 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
307 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
308 //
test_vmul_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)309 vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
310   return vmul(op1, op2, vl);
311 }
312 
313 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
314 // CHECK-RV64-NEXT:  entry:
315 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
316 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
317 //
test_vmul_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)318 vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
319   return vmul(op1, op2, vl);
320 }
321 
322 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
323 // CHECK-RV64-NEXT:  entry:
324 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
326 //
test_vmul_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)327 vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
328   return vmul(op1, op2, vl);
329 }
330 
331 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
332 // CHECK-RV64-NEXT:  entry:
333 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
335 //
test_vmul_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)336 vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
337   return vmul(op1, op2, vl);
338 }
339 
340 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
341 // CHECK-RV64-NEXT:  entry:
342 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
344 //
test_vmul_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)345 vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
346   return vmul(op1, op2, vl);
347 }
348 
349 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
353 //
test_vmul_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)354 vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
355   return vmul(op1, op2, vl);
356 }
357 
358 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
362 //
test_vmul_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)363 vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
364   return vmul(op1, op2, vl);
365 }
366 
367 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
368 // CHECK-RV64-NEXT:  entry:
369 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
371 //
test_vmul_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)372 vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
373   return vmul(op1, op2, vl);
374 }
375 
376 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
377 // CHECK-RV64-NEXT:  entry:
378 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
379 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
380 //
test_vmul_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)381 vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
382   return vmul(op1, op2, vl);
383 }
384 
385 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
386 // CHECK-RV64-NEXT:  entry:
387 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
388 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
389 //
test_vmul_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)390 vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
391   return vmul(op1, op2, vl);
392 }
393 
394 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
398 //
test_vmul_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)399 vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
400   return vmul(op1, op2, vl);
401 }
402 
403 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
404 // CHECK-RV64-NEXT:  entry:
405 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
406 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
407 //
test_vmul_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)408 vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
409   return vmul(op1, op2, vl);
410 }
411 
412 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
413 // CHECK-RV64-NEXT:  entry:
414 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
415 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
416 //
test_vmul_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)417 vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
418   return vmul(op1, op2, vl);
419 }
420 
421 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
422 // CHECK-RV64-NEXT:  entry:
423 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
425 //
test_vmul_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)426 vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
427   return vmul(op1, op2, vl);
428 }
429 
430 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
431 // CHECK-RV64-NEXT:  entry:
432 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
433 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
434 //
test_vmul_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)435 vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
436   return vmul(op1, op2, vl);
437 }
438 
439 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
443 //
test_vmul_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)444 vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
445   return vmul(op1, op2, vl);
446 }
447 
448 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
452 //
test_vmul_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)453 vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
454   return vmul(op1, op2, vl);
455 }
456 
457 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
458 // CHECK-RV64-NEXT:  entry:
459 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
460 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
461 //
test_vmul_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)462 vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
463   return vmul(op1, op2, vl);
464 }
465 
466 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
467 // CHECK-RV64-NEXT:  entry:
468 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
469 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
470 //
test_vmul_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)471 vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
472   return vmul(op1, op2, vl);
473 }
474 
475 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
476 // CHECK-RV64-NEXT:  entry:
477 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
478 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
479 //
test_vmul_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)480 vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
481   return vmul(op1, op2, vl);
482 }
483 
484 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
485 // CHECK-RV64-NEXT:  entry:
486 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
487 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
488 //
test_vmul_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)489 vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
490   return vmul(op1, op2, vl);
491 }
492 
493 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
494 // CHECK-RV64-NEXT:  entry:
495 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
497 //
test_vmul_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)498 vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
499   return vmul(op1, op2, vl);
500 }
501 
502 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
503 // CHECK-RV64-NEXT:  entry:
504 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
505 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
506 //
test_vmul_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)507 vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
508   return vmul(op1, op2, vl);
509 }
510 
511 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
512 // CHECK-RV64-NEXT:  entry:
513 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
514 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
515 //
test_vmul_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)516 vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
517   return vmul(op1, op2, vl);
518 }
519 
520 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
521 // CHECK-RV64-NEXT:  entry:
522 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
523 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
524 //
test_vmul_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)525 vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
526   return vmul(op1, op2, vl);
527 }
528 
529 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
533 //
test_vmul_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)534 vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
535   return vmul(op1, op2, vl);
536 }
537 
538 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
542 //
test_vmul_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)543 vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
544   return vmul(op1, op2, vl);
545 }
546 
547 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
548 // CHECK-RV64-NEXT:  entry:
549 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
551 //
test_vmul_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)552 vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
553   return vmul(op1, op2, vl);
554 }
555 
556 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
557 // CHECK-RV64-NEXT:  entry:
558 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
559 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
560 //
test_vmul_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)561 vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
562   return vmul(op1, op2, vl);
563 }
564 
565 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
566 // CHECK-RV64-NEXT:  entry:
567 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
568 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
569 //
test_vmul_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)570 vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
571   return vmul(op1, op2, vl);
572 }
573 
574 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
575 // CHECK-RV64-NEXT:  entry:
576 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
577 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
578 //
test_vmul_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)579 vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
580   return vmul(op1, op2, vl);
581 }
582 
583 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
584 // CHECK-RV64-NEXT:  entry:
585 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
586 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
587 //
test_vmul_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)588 vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
589   return vmul(op1, op2, vl);
590 }
591 
592 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
593 // CHECK-RV64-NEXT:  entry:
594 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
596 //
test_vmul_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)597 vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
598   return vmul(op1, op2, vl);
599 }
600 
601 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
602 // CHECK-RV64-NEXT:  entry:
603 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
604 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
605 //
test_vmul_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)606 vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
607   return vmul(op1, op2, vl);
608 }
609 
610 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
611 // CHECK-RV64-NEXT:  entry:
612 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
613 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
614 //
test_vmul_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)615 vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
616   return vmul(op1, op2, vl);
617 }
618 
619 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
623 //
test_vmul_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)624 vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
625   return vmul(op1, op2, vl);
626 }
627 
628 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
629 // CHECK-RV64-NEXT:  entry:
630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
632 //
test_vmul_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)633 vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
634   return vmul(op1, op2, vl);
635 }
636 
637 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
638 // CHECK-RV64-NEXT:  entry:
639 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
640 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
641 //
test_vmul_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)642 vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
643   return vmul(op1, op2, vl);
644 }
645 
646 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
647 // CHECK-RV64-NEXT:  entry:
648 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
650 //
test_vmul_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)651 vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
652   return vmul(op1, op2, vl);
653 }
654 
655 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
656 // CHECK-RV64-NEXT:  entry:
657 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
658 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
659 //
test_vmul_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)660 vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
661   return vmul(op1, op2, vl);
662 }
663 
664 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
665 // CHECK-RV64-NEXT:  entry:
666 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
667 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
668 //
test_vmul_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)669 vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
670   return vmul(op1, op2, vl);
671 }
672 
673 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
674 // CHECK-RV64-NEXT:  entry:
675 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
676 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
677 //
test_vmul_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)678 vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
679   return vmul(op1, op2, vl);
680 }
681 
682 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
683 // CHECK-RV64-NEXT:  entry:
684 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
685 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
686 //
test_vmul_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)687 vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
688   return vmul(op1, op2, vl);
689 }
690 
691 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
692 // CHECK-RV64-NEXT:  entry:
693 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
695 //
test_vmul_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)696 vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
697   return vmul(op1, op2, vl);
698 }
699 
700 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
701 // CHECK-RV64-NEXT:  entry:
702 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
704 //
test_vmul_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)705 vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
706   return vmul(op1, op2, vl);
707 }
708 
709 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
710 // CHECK-RV64-NEXT:  entry:
711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
712 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
713 //
test_vmul_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)714 vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
715   return vmul(op1, op2, vl);
716 }
717 
718 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
719 // CHECK-RV64-NEXT:  entry:
720 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
722 //
test_vmul_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)723 vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
724   return vmul(op1, op2, vl);
725 }
726 
727 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
728 // CHECK-RV64-NEXT:  entry:
729 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
730 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
731 //
test_vmul_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)732 vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
733   return vmul(op1, op2, vl);
734 }
735 
736 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
737 // CHECK-RV64-NEXT:  entry:
738 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
740 //
test_vmul_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)741 vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
742   return vmul(op1, op2, vl);
743 }
744 
745 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
746 // CHECK-RV64-NEXT:  entry:
747 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
749 //
test_vmul_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)750 vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
751   return vmul(op1, op2, vl);
752 }
753 
754 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
755 // CHECK-RV64-NEXT:  entry:
756 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
757 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
758 //
test_vmul_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)759 vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
760   return vmul(op1, op2, vl);
761 }
762 
763 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
764 // CHECK-RV64-NEXT:  entry:
765 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
766 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
767 //
test_vmul_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)768 vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
769   return vmul(op1, op2, vl);
770 }
771 
772 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
773 // CHECK-RV64-NEXT:  entry:
774 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
775 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
776 //
test_vmul_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)777 vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
778   return vmul(op1, op2, vl);
779 }
780 
781 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
782 // CHECK-RV64-NEXT:  entry:
783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
785 //
test_vmul_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)786 vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
787   return vmul(op1, op2, vl);
788 }
789 
790 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
791 // CHECK-RV64-NEXT:  entry:
792 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
794 //
test_vmul_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)795 vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
796   return vmul(op1, op2, vl);
797 }
798 
799 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
803 //
test_vmulh_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)804 vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
805   return vmulh(op1, op2, vl);
806 }
807 
808 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
809 // CHECK-RV64-NEXT:  entry:
810 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
811 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
812 //
test_vmulh_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)813 vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
814   return vmulh(op1, op2, vl);
815 }
816 
817 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
818 // CHECK-RV64-NEXT:  entry:
819 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
820 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
821 //
test_vmulh_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)822 vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
823   return vmulh(op1, op2, vl);
824 }
825 
826 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
827 // CHECK-RV64-NEXT:  entry:
828 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
829 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
830 //
test_vmulh_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)831 vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
832   return vmulh(op1, op2, vl);
833 }
834 
835 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
836 // CHECK-RV64-NEXT:  entry:
837 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
838 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
839 //
test_vmulh_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)840 vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
841   return vmulh(op1, op2, vl);
842 }
843 
844 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
845 // CHECK-RV64-NEXT:  entry:
846 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
847 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
848 //
test_vmulh_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)849 vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
850   return vmulh(op1, op2, vl);
851 }
852 
853 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
854 // CHECK-RV64-NEXT:  entry:
855 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
856 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
857 //
test_vmulh_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)858 vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
859   return vmulh(op1, op2, vl);
860 }
861 
862 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
863 // CHECK-RV64-NEXT:  entry:
864 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
865 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
866 //
test_vmulh_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)867 vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
868   return vmulh(op1, op2, vl);
869 }
870 
871 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
872 // CHECK-RV64-NEXT:  entry:
873 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
874 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
875 //
test_vmulh_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)876 vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
877   return vmulh(op1, op2, vl);
878 }
879 
880 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
881 // CHECK-RV64-NEXT:  entry:
882 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
883 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
884 //
test_vmulh_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)885 vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
886   return vmulh(op1, op2, vl);
887 }
888 
889 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
893 //
test_vmulh_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)894 vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
895   return vmulh(op1, op2, vl);
896 }
897 
898 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
899 // CHECK-RV64-NEXT:  entry:
900 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
901 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
902 //
test_vmulh_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)903 vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
904   return vmulh(op1, op2, vl);
905 }
906 
907 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
908 // CHECK-RV64-NEXT:  entry:
909 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
910 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
911 //
test_vmulh_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)912 vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
913   return vmulh(op1, op2, vl);
914 }
915 
916 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
917 // CHECK-RV64-NEXT:  entry:
918 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
919 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
920 //
test_vmulh_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)921 vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
922   return vmulh(op1, op2, vl);
923 }
924 
925 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
926 // CHECK-RV64-NEXT:  entry:
927 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
928 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
929 //
test_vmulh_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)930 vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
931   return vmulh(op1, op2, vl);
932 }
933 
934 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
935 // CHECK-RV64-NEXT:  entry:
936 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
937 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
938 //
test_vmulh_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)939 vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
940   return vmulh(op1, op2, vl);
941 }
942 
943 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
944 // CHECK-RV64-NEXT:  entry:
945 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
946 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
947 //
test_vmulh_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)948 vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
949   return vmulh(op1, op2, vl);
950 }
951 
952 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
953 // CHECK-RV64-NEXT:  entry:
954 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
955 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
956 //
test_vmulh_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)957 vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
958   return vmulh(op1, op2, vl);
959 }
960 
961 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
962 // CHECK-RV64-NEXT:  entry:
963 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
964 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
965 //
test_vmulh_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)966 vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
967   return vmulh(op1, op2, vl);
968 }
969 
970 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
971 // CHECK-RV64-NEXT:  entry:
972 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
973 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
974 //
test_vmulh_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)975 vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
976   return vmulh(op1, op2, vl);
977 }
978 
979 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
982 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
983 //
test_vmulh_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)984 vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
985   return vmulh(op1, op2, vl);
986 }
987 
988 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
992 //
test_vmulh_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)993 vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
994   return vmulh(op1, op2, vl);
995 }
996 
997 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
998 // CHECK-RV64-NEXT:  entry:
999 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1000 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1001 //
test_vmulh_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)1002 vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
1003   return vmulh(op1, op2, vl);
1004 }
1005 
1006 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
1007 // CHECK-RV64-NEXT:  entry:
1008 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1009 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1010 //
test_vmulh_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)1011 vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
1012   return vmulh(op1, op2, vl);
1013 }
1014 
1015 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
1016 // CHECK-RV64-NEXT:  entry:
1017 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1018 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1019 //
test_vmulh_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)1020 vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
1021   return vmulh(op1, op2, vl);
1022 }
1023 
1024 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
1025 // CHECK-RV64-NEXT:  entry:
1026 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1027 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1028 //
test_vmulh_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)1029 vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
1030   return vmulh(op1, op2, vl);
1031 }
1032 
1033 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
1034 // CHECK-RV64-NEXT:  entry:
1035 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1036 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1037 //
test_vmulh_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)1038 vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1039   return vmulh(op1, op2, vl);
1040 }
1041 
1042 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
1043 // CHECK-RV64-NEXT:  entry:
1044 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1045 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1046 //
test_vmulh_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)1047 vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
1048   return vmulh(op1, op2, vl);
1049 }
1050 
1051 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
1052 // CHECK-RV64-NEXT:  entry:
1053 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1054 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1055 //
test_vmulh_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)1056 vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
1057   return vmulh(op1, op2, vl);
1058 }
1059 
1060 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
1061 // CHECK-RV64-NEXT:  entry:
1062 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1063 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1064 //
test_vmulh_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)1065 vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
1066   return vmulh(op1, op2, vl);
1067 }
1068 
1069 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1072 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1073 //
test_vmulh_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)1074 vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
1075   return vmulh(op1, op2, vl);
1076 }
1077 
1078 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
1079 // CHECK-RV64-NEXT:  entry:
1080 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1081 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1082 //
test_vmulh_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)1083 vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
1084   return vmulh(op1, op2, vl);
1085 }
1086 
1087 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
1088 // CHECK-RV64-NEXT:  entry:
1089 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1090 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1091 //
test_vmulh_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)1092 vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
1093   return vmulh(op1, op2, vl);
1094 }
1095 
1096 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
1097 // CHECK-RV64-NEXT:  entry:
1098 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1099 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1100 //
test_vmulh_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)1101 vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
1102   return vmulh(op1, op2, vl);
1103 }
1104 
1105 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
1106 // CHECK-RV64-NEXT:  entry:
1107 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1108 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1109 //
test_vmulh_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)1110 vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
1111   return vmulh(op1, op2, vl);
1112 }
1113 
1114 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
1115 // CHECK-RV64-NEXT:  entry:
1116 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1117 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1118 //
test_vmulh_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)1119 vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
1120   return vmulh(op1, op2, vl);
1121 }
1122 
1123 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
1124 // CHECK-RV64-NEXT:  entry:
1125 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1126 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1127 //
test_vmulh_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)1128 vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
1129   return vmulh(op1, op2, vl);
1130 }
1131 
1132 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
1133 // CHECK-RV64-NEXT:  entry:
1134 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1135 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1136 //
test_vmulh_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)1137 vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
1138   return vmulh(op1, op2, vl);
1139 }
1140 
1141 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
1142 // CHECK-RV64-NEXT:  entry:
1143 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1144 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1145 //
test_vmulh_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)1146 vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
1147   return vmulh(op1, op2, vl);
1148 }
1149 
1150 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
1151 // CHECK-RV64-NEXT:  entry:
1152 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1153 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1154 //
test_vmulh_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)1155 vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
1156   return vmulh(op1, op2, vl);
1157 }
1158 
1159 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
1160 // CHECK-RV64-NEXT:  entry:
1161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1162 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1163 //
test_vmulh_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)1164 vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
1165   return vmulh(op1, op2, vl);
1166 }
1167 
1168 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
1169 // CHECK-RV64-NEXT:  entry:
1170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1171 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1172 //
test_vmulh_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)1173 vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
1174   return vmulh(op1, op2, vl);
1175 }
1176 
1177 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
1178 // CHECK-RV64-NEXT:  entry:
1179 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1180 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1181 //
test_vmulh_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)1182 vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
1183   return vmulh(op1, op2, vl);
1184 }
1185 
1186 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
1187 // CHECK-RV64-NEXT:  entry:
1188 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1189 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1190 //
test_vmulh_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)1191 vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
1192   return vmulh(op1, op2, vl);
1193 }
1194 
1195 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
1196 // CHECK-RV64-NEXT:  entry:
1197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1198 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1199 //
test_vmulhu_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1200 vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1201   return vmulhu(op1, op2, vl);
1202 }
1203 
1204 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
1205 // CHECK-RV64-NEXT:  entry:
1206 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1207 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1208 //
test_vmulhu_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)1209 vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
1210   return vmulhu(op1, op2, vl);
1211 }
1212 
1213 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
1214 // CHECK-RV64-NEXT:  entry:
1215 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1216 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1217 //
test_vmulhu_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1218 vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1219   return vmulhu(op1, op2, vl);
1220 }
1221 
1222 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
1223 // CHECK-RV64-NEXT:  entry:
1224 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1225 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1226 //
test_vmulhu_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)1227 vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
1228   return vmulhu(op1, op2, vl);
1229 }
1230 
1231 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
1232 // CHECK-RV64-NEXT:  entry:
1233 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1234 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1235 //
test_vmulhu_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1236 vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1237   return vmulhu(op1, op2, vl);
1238 }
1239 
1240 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
1241 // CHECK-RV64-NEXT:  entry:
1242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1243 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1244 //
test_vmulhu_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)1245 vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
1246   return vmulhu(op1, op2, vl);
1247 }
1248 
1249 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
1250 // CHECK-RV64-NEXT:  entry:
1251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1252 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1253 //
test_vmulhu_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)1254 vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1255   return vmulhu(op1, op2, vl);
1256 }
1257 
1258 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
1259 // CHECK-RV64-NEXT:  entry:
1260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1261 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1262 //
test_vmulhu_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)1263 vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
1264   return vmulhu(op1, op2, vl);
1265 }
1266 
1267 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
1268 // CHECK-RV64-NEXT:  entry:
1269 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1270 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1271 //
test_vmulhu_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)1272 vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1273   return vmulhu(op1, op2, vl);
1274 }
1275 
1276 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
1277 // CHECK-RV64-NEXT:  entry:
1278 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1279 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1280 //
test_vmulhu_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)1281 vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
1282   return vmulhu(op1, op2, vl);
1283 }
1284 
1285 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
1286 // CHECK-RV64-NEXT:  entry:
1287 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1288 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1289 //
test_vmulhu_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)1290 vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1291   return vmulhu(op1, op2, vl);
1292 }
1293 
1294 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
1295 // CHECK-RV64-NEXT:  entry:
1296 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1297 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1298 //
test_vmulhu_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)1299 vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
1300   return vmulhu(op1, op2, vl);
1301 }
1302 
1303 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
1304 // CHECK-RV64-NEXT:  entry:
1305 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1306 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1307 //
test_vmulhu_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)1308 vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1309   return vmulhu(op1, op2, vl);
1310 }
1311 
1312 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
1313 // CHECK-RV64-NEXT:  entry:
1314 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1315 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1316 //
test_vmulhu_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)1317 vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
1318   return vmulhu(op1, op2, vl);
1319 }
1320 
1321 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
1322 // CHECK-RV64-NEXT:  entry:
1323 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1324 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1325 //
test_vmulhu_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1326 vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1327   return vmulhu(op1, op2, vl);
1328 }
1329 
1330 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
1331 // CHECK-RV64-NEXT:  entry:
1332 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1333 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1334 //
test_vmulhu_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)1335 vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
1336   return vmulhu(op1, op2, vl);
1337 }
1338 
1339 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
1340 // CHECK-RV64-NEXT:  entry:
1341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1342 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1343 //
test_vmulhu_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1344 vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1345   return vmulhu(op1, op2, vl);
1346 }
1347 
1348 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
1349 // CHECK-RV64-NEXT:  entry:
1350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1351 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1352 //
test_vmulhu_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)1353 vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
1354   return vmulhu(op1, op2, vl);
1355 }
1356 
1357 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
1358 // CHECK-RV64-NEXT:  entry:
1359 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1360 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1361 //
test_vmulhu_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)1362 vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1363   return vmulhu(op1, op2, vl);
1364 }
1365 
1366 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
1367 // CHECK-RV64-NEXT:  entry:
1368 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1369 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1370 //
test_vmulhu_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)1371 vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
1372   return vmulhu(op1, op2, vl);
1373 }
1374 
1375 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
1376 // CHECK-RV64-NEXT:  entry:
1377 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1378 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1379 //
test_vmulhu_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)1380 vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1381   return vmulhu(op1, op2, vl);
1382 }
1383 
1384 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
1385 // CHECK-RV64-NEXT:  entry:
1386 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1387 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1388 //
test_vmulhu_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)1389 vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
1390   return vmulhu(op1, op2, vl);
1391 }
1392 
1393 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
1394 // CHECK-RV64-NEXT:  entry:
1395 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1396 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1397 //
test_vmulhu_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)1398 vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1399   return vmulhu(op1, op2, vl);
1400 }
1401 
1402 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
1403 // CHECK-RV64-NEXT:  entry:
1404 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1405 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1406 //
test_vmulhu_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)1407 vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
1408   return vmulhu(op1, op2, vl);
1409 }
1410 
1411 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
1412 // CHECK-RV64-NEXT:  entry:
1413 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1414 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1415 //
test_vmulhu_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)1416 vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
1417   return vmulhu(op1, op2, vl);
1418 }
1419 
1420 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
1421 // CHECK-RV64-NEXT:  entry:
1422 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1423 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1424 //
test_vmulhu_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)1425 vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
1426   return vmulhu(op1, op2, vl);
1427 }
1428 
1429 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
1430 // CHECK-RV64-NEXT:  entry:
1431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1432 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1433 //
test_vmulhu_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1434 vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
1435   return vmulhu(op1, op2, vl);
1436 }
1437 
1438 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
1439 // CHECK-RV64-NEXT:  entry:
1440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1441 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1442 //
test_vmulhu_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)1443 vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
1444   return vmulhu(op1, op2, vl);
1445 }
1446 
1447 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
1448 // CHECK-RV64-NEXT:  entry:
1449 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1450 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1451 //
test_vmulhu_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)1452 vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1453   return vmulhu(op1, op2, vl);
1454 }
1455 
1456 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
1457 // CHECK-RV64-NEXT:  entry:
1458 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1459 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1460 //
test_vmulhu_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)1461 vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
1462   return vmulhu(op1, op2, vl);
1463 }
1464 
1465 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
1466 // CHECK-RV64-NEXT:  entry:
1467 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1468 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1469 //
test_vmulhu_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)1470 vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1471   return vmulhu(op1, op2, vl);
1472 }
1473 
1474 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
1475 // CHECK-RV64-NEXT:  entry:
1476 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1477 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1478 //
test_vmulhu_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)1479 vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
1480   return vmulhu(op1, op2, vl);
1481 }
1482 
1483 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
1484 // CHECK-RV64-NEXT:  entry:
1485 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1486 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1487 //
test_vmulhu_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)1488 vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1489   return vmulhu(op1, op2, vl);
1490 }
1491 
1492 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
1493 // CHECK-RV64-NEXT:  entry:
1494 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1495 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1496 //
test_vmulhu_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)1497 vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
1498   return vmulhu(op1, op2, vl);
1499 }
1500 
1501 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
1502 // CHECK-RV64-NEXT:  entry:
1503 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1504 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1505 //
test_vmulhu_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)1506 vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
1507   return vmulhu(op1, op2, vl);
1508 }
1509 
1510 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
1511 // CHECK-RV64-NEXT:  entry:
1512 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1513 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1514 //
test_vmulhu_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)1515 vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
1516   return vmulhu(op1, op2, vl);
1517 }
1518 
1519 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
1520 // CHECK-RV64-NEXT:  entry:
1521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1522 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1523 //
test_vmulhu_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)1524 vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
1525   return vmulhu(op1, op2, vl);
1526 }
1527 
1528 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
1529 // CHECK-RV64-NEXT:  entry:
1530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1531 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1532 //
test_vmulhu_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)1533 vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
1534   return vmulhu(op1, op2, vl);
1535 }
1536 
1537 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
1538 // CHECK-RV64-NEXT:  entry:
1539 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1540 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1541 //
test_vmulhu_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)1542 vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
1543   return vmulhu(op1, op2, vl);
1544 }
1545 
1546 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
1547 // CHECK-RV64-NEXT:  entry:
1548 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1549 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1550 //
test_vmulhu_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)1551 vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
1552   return vmulhu(op1, op2, vl);
1553 }
1554 
1555 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
1556 // CHECK-RV64-NEXT:  entry:
1557 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1558 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1559 //
test_vmulhu_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)1560 vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
1561   return vmulhu(op1, op2, vl);
1562 }
1563 
1564 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
1565 // CHECK-RV64-NEXT:  entry:
1566 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1567 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1568 //
test_vmulhu_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)1569 vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
1570   return vmulhu(op1, op2, vl);
1571 }
1572 
1573 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
1574 // CHECK-RV64-NEXT:  entry:
1575 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1576 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1577 //
test_vmulhu_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)1578 vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
1579   return vmulhu(op1, op2, vl);
1580 }
1581 
1582 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
1583 // CHECK-RV64-NEXT:  entry:
1584 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1585 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1586 //
test_vmulhu_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)1587 vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
1588   return vmulhu(op1, op2, vl);
1589 }
1590 
1591 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
1592 // CHECK-RV64-NEXT:  entry:
1593 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1594 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1595 //
test_vmulhsu_vv_i8mf8(vint8mf8_t op1,vuint8mf8_t op2,size_t vl)1596 vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1597   return vmulhsu(op1, op2, vl);
1598 }
1599 
1600 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
1601 // CHECK-RV64-NEXT:  entry:
1602 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1603 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1604 //
test_vmulhsu_vx_i8mf8(vint8mf8_t op1,uint8_t op2,size_t vl)1605 vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
1606   return vmulhsu(op1, op2, vl);
1607 }
1608 
1609 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
1610 // CHECK-RV64-NEXT:  entry:
1611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1613 //
test_vmulhsu_vv_i8mf4(vint8mf4_t op1,vuint8mf4_t op2,size_t vl)1614 vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1615   return vmulhsu(op1, op2, vl);
1616 }
1617 
1618 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
1619 // CHECK-RV64-NEXT:  entry:
1620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1621 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1622 //
test_vmulhsu_vx_i8mf4(vint8mf4_t op1,uint8_t op2,size_t vl)1623 vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
1624   return vmulhsu(op1, op2, vl);
1625 }
1626 
1627 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
1628 // CHECK-RV64-NEXT:  entry:
1629 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1630 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1631 //
test_vmulhsu_vv_i8mf2(vint8mf2_t op1,vuint8mf2_t op2,size_t vl)1632 vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1633   return vmulhsu(op1, op2, vl);
1634 }
1635 
1636 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
1637 // CHECK-RV64-NEXT:  entry:
1638 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1639 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1640 //
test_vmulhsu_vx_i8mf2(vint8mf2_t op1,uint8_t op2,size_t vl)1641 vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
1642   return vmulhsu(op1, op2, vl);
1643 }
1644 
1645 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
1646 // CHECK-RV64-NEXT:  entry:
1647 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1648 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1649 //
test_vmulhsu_vv_i8m1(vint8m1_t op1,vuint8m1_t op2,size_t vl)1650 vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
1651   return vmulhsu(op1, op2, vl);
1652 }
1653 
1654 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
1655 // CHECK-RV64-NEXT:  entry:
1656 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1657 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1658 //
test_vmulhsu_vx_i8m1(vint8m1_t op1,uint8_t op2,size_t vl)1659 vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
1660   return vmulhsu(op1, op2, vl);
1661 }
1662 
1663 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
1664 // CHECK-RV64-NEXT:  entry:
1665 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1666 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1667 //
test_vmulhsu_vv_i8m2(vint8m2_t op1,vuint8m2_t op2,size_t vl)1668 vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
1669   return vmulhsu(op1, op2, vl);
1670 }
1671 
1672 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
1673 // CHECK-RV64-NEXT:  entry:
1674 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1675 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1676 //
test_vmulhsu_vx_i8m2(vint8m2_t op1,uint8_t op2,size_t vl)1677 vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
1678   return vmulhsu(op1, op2, vl);
1679 }
1680 
1681 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
1682 // CHECK-RV64-NEXT:  entry:
1683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1684 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1685 //
test_vmulhsu_vv_i8m4(vint8m4_t op1,vuint8m4_t op2,size_t vl)1686 vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
1687   return vmulhsu(op1, op2, vl);
1688 }
1689 
1690 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
1691 // CHECK-RV64-NEXT:  entry:
1692 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1693 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1694 //
test_vmulhsu_vx_i8m4(vint8m4_t op1,uint8_t op2,size_t vl)1695 vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
1696   return vmulhsu(op1, op2, vl);
1697 }
1698 
1699 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
1700 // CHECK-RV64-NEXT:  entry:
1701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1702 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1703 //
test_vmulhsu_vv_i8m8(vint8m8_t op1,vuint8m8_t op2,size_t vl)1704 vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
1705   return vmulhsu(op1, op2, vl);
1706 }
1707 
1708 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
1709 // CHECK-RV64-NEXT:  entry:
1710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1711 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1712 //
test_vmulhsu_vx_i8m8(vint8m8_t op1,uint8_t op2,size_t vl)1713 vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
1714   return vmulhsu(op1, op2, vl);
1715 }
1716 
1717 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
1718 // CHECK-RV64-NEXT:  entry:
1719 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1720 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1721 //
test_vmulhsu_vv_i16mf4(vint16mf4_t op1,vuint16mf4_t op2,size_t vl)1722 vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1723   return vmulhsu(op1, op2, vl);
1724 }
1725 
1726 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
1727 // CHECK-RV64-NEXT:  entry:
1728 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1729 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1730 //
test_vmulhsu_vx_i16mf4(vint16mf4_t op1,uint16_t op2,size_t vl)1731 vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
1732   return vmulhsu(op1, op2, vl);
1733 }
1734 
1735 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
1736 // CHECK-RV64-NEXT:  entry:
1737 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1738 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1739 //
test_vmulhsu_vv_i16mf2(vint16mf2_t op1,vuint16mf2_t op2,size_t vl)1740 vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1741   return vmulhsu(op1, op2, vl);
1742 }
1743 
1744 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
1745 // CHECK-RV64-NEXT:  entry:
1746 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1747 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1748 //
test_vmulhsu_vx_i16mf2(vint16mf2_t op1,uint16_t op2,size_t vl)1749 vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
1750   return vmulhsu(op1, op2, vl);
1751 }
1752 
1753 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
1754 // CHECK-RV64-NEXT:  entry:
1755 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1756 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1757 //
test_vmulhsu_vv_i16m1(vint16m1_t op1,vuint16m1_t op2,size_t vl)1758 vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
1759   return vmulhsu(op1, op2, vl);
1760 }
1761 
1762 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
1763 // CHECK-RV64-NEXT:  entry:
1764 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1765 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1766 //
test_vmulhsu_vx_i16m1(vint16m1_t op1,uint16_t op2,size_t vl)1767 vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
1768   return vmulhsu(op1, op2, vl);
1769 }
1770 
1771 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
1772 // CHECK-RV64-NEXT:  entry:
1773 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1774 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1775 //
test_vmulhsu_vv_i16m2(vint16m2_t op1,vuint16m2_t op2,size_t vl)1776 vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
1777   return vmulhsu(op1, op2, vl);
1778 }
1779 
1780 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
1781 // CHECK-RV64-NEXT:  entry:
1782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1783 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1784 //
test_vmulhsu_vx_i16m2(vint16m2_t op1,uint16_t op2,size_t vl)1785 vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
1786   return vmulhsu(op1, op2, vl);
1787 }
1788 
1789 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
1790 // CHECK-RV64-NEXT:  entry:
1791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1792 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1793 //
test_vmulhsu_vv_i16m4(vint16m4_t op1,vuint16m4_t op2,size_t vl)1794 vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
1795   return vmulhsu(op1, op2, vl);
1796 }
1797 
1798 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
1799 // CHECK-RV64-NEXT:  entry:
1800 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1801 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1802 //
test_vmulhsu_vx_i16m4(vint16m4_t op1,uint16_t op2,size_t vl)1803 vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
1804   return vmulhsu(op1, op2, vl);
1805 }
1806 
1807 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
1808 // CHECK-RV64-NEXT:  entry:
1809 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1810 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1811 //
test_vmulhsu_vv_i16m8(vint16m8_t op1,vuint16m8_t op2,size_t vl)1812 vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
1813   return vmulhsu(op1, op2, vl);
1814 }
1815 
1816 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
1817 // CHECK-RV64-NEXT:  entry:
1818 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1819 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1820 //
test_vmulhsu_vx_i16m8(vint16m8_t op1,uint16_t op2,size_t vl)1821 vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
1822   return vmulhsu(op1, op2, vl);
1823 }
1824 
1825 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
1826 // CHECK-RV64-NEXT:  entry:
1827 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1828 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1829 //
test_vmulhsu_vv_i32mf2(vint32mf2_t op1,vuint32mf2_t op2,size_t vl)1830 vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
1831   return vmulhsu(op1, op2, vl);
1832 }
1833 
1834 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
1835 // CHECK-RV64-NEXT:  entry:
1836 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1837 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1838 //
test_vmulhsu_vx_i32mf2(vint32mf2_t op1,uint32_t op2,size_t vl)1839 vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
1840   return vmulhsu(op1, op2, vl);
1841 }
1842 
1843 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
1844 // CHECK-RV64-NEXT:  entry:
1845 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1846 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1847 //
test_vmulhsu_vv_i32m1(vint32m1_t op1,vuint32m1_t op2,size_t vl)1848 vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
1849   return vmulhsu(op1, op2, vl);
1850 }
1851 
1852 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
1853 // CHECK-RV64-NEXT:  entry:
1854 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1855 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1856 //
test_vmulhsu_vx_i32m1(vint32m1_t op1,uint32_t op2,size_t vl)1857 vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
1858   return vmulhsu(op1, op2, vl);
1859 }
1860 
1861 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
1862 // CHECK-RV64-NEXT:  entry:
1863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1864 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1865 //
test_vmulhsu_vv_i32m2(vint32m2_t op1,vuint32m2_t op2,size_t vl)1866 vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
1867   return vmulhsu(op1, op2, vl);
1868 }
1869 
1870 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
1871 // CHECK-RV64-NEXT:  entry:
1872 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1873 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1874 //
test_vmulhsu_vx_i32m2(vint32m2_t op1,uint32_t op2,size_t vl)1875 vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
1876   return vmulhsu(op1, op2, vl);
1877 }
1878 
1879 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
1880 // CHECK-RV64-NEXT:  entry:
1881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1882 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1883 //
test_vmulhsu_vv_i32m4(vint32m4_t op1,vuint32m4_t op2,size_t vl)1884 vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
1885   return vmulhsu(op1, op2, vl);
1886 }
1887 
1888 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
1889 // CHECK-RV64-NEXT:  entry:
1890 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1891 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1892 //
test_vmulhsu_vx_i32m4(vint32m4_t op1,uint32_t op2,size_t vl)1893 vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
1894   return vmulhsu(op1, op2, vl);
1895 }
1896 
1897 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
1898 // CHECK-RV64-NEXT:  entry:
1899 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1900 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1901 //
test_vmulhsu_vv_i32m8(vint32m8_t op1,vuint32m8_t op2,size_t vl)1902 vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
1903   return vmulhsu(op1, op2, vl);
1904 }
1905 
1906 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
1907 // CHECK-RV64-NEXT:  entry:
1908 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1909 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1910 //
test_vmulhsu_vx_i32m8(vint32m8_t op1,uint32_t op2,size_t vl)1911 vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
1912   return vmulhsu(op1, op2, vl);
1913 }
1914 
1915 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
1916 // CHECK-RV64-NEXT:  entry:
1917 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1918 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1919 //
test_vmulhsu_vv_i64m1(vint64m1_t op1,vuint64m1_t op2,size_t vl)1920 vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
1921   return vmulhsu(op1, op2, vl);
1922 }
1923 
1924 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
1925 // CHECK-RV64-NEXT:  entry:
1926 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1927 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1928 //
test_vmulhsu_vx_i64m1(vint64m1_t op1,uint64_t op2,size_t vl)1929 vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
1930   return vmulhsu(op1, op2, vl);
1931 }
1932 
1933 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
1934 // CHECK-RV64-NEXT:  entry:
1935 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1936 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1937 //
test_vmulhsu_vv_i64m2(vint64m2_t op1,vuint64m2_t op2,size_t vl)1938 vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
1939   return vmulhsu(op1, op2, vl);
1940 }
1941 
1942 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
1943 // CHECK-RV64-NEXT:  entry:
1944 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1945 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1946 //
test_vmulhsu_vx_i64m2(vint64m2_t op1,uint64_t op2,size_t vl)1947 vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
1948   return vmulhsu(op1, op2, vl);
1949 }
1950 
1951 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
1952 // CHECK-RV64-NEXT:  entry:
1953 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1954 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1955 //
test_vmulhsu_vv_i64m4(vint64m4_t op1,vuint64m4_t op2,size_t vl)1956 vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
1957   return vmulhsu(op1, op2, vl);
1958 }
1959 
1960 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
1961 // CHECK-RV64-NEXT:  entry:
1962 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1963 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1964 //
test_vmulhsu_vx_i64m4(vint64m4_t op1,uint64_t op2,size_t vl)1965 vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
1966   return vmulhsu(op1, op2, vl);
1967 }
1968 
1969 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
1970 // CHECK-RV64-NEXT:  entry:
1971 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1972 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1973 //
test_vmulhsu_vv_i64m8(vint64m8_t op1,vuint64m8_t op2,size_t vl)1974 vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
1975   return vmulhsu(op1, op2, vl);
1976 }
1977 
1978 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
1979 // CHECK-RV64-NEXT:  entry:
1980 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1981 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1982 //
test_vmulhsu_vx_i64m8(vint64m8_t op1,uint64_t op2,size_t vl)1983 vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
1984   return vmulhsu(op1, op2, vl);
1985 }
1986 
1987 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
1988 // CHECK-RV64-NEXT:  entry:
1989 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1990 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1991 //
test_vmul_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)1992 vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
1993   return vmul(mask, maskedoff, op1, op2, vl);
1994 }
1995 
1996 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
1997 // CHECK-RV64-NEXT:  entry:
1998 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1999 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2000 //
test_vmul_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)2001 vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
2002   return vmul(mask, maskedoff, op1, op2, vl);
2003 }
2004 
2005 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
2006 // CHECK-RV64-NEXT:  entry:
2007 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2008 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2009 //
test_vmul_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)2010 vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
2011   return vmul(mask, maskedoff, op1, op2, vl);
2012 }
2013 
2014 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
2015 // CHECK-RV64-NEXT:  entry:
2016 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2017 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2018 //
test_vmul_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)2019 vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
2020   return vmul(mask, maskedoff, op1, op2, vl);
2021 }
2022 
2023 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
2024 // CHECK-RV64-NEXT:  entry:
2025 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2026 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2027 //
test_vmul_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)2028 vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
2029   return vmul(mask, maskedoff, op1, op2, vl);
2030 }
2031 
2032 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
2033 // CHECK-RV64-NEXT:  entry:
2034 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2035 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2036 //
test_vmul_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)2037 vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
2038   return vmul(mask, maskedoff, op1, op2, vl);
2039 }
2040 
2041 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
2042 // CHECK-RV64-NEXT:  entry:
2043 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2044 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2045 //
test_vmul_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)2046 vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
2047   return vmul(mask, maskedoff, op1, op2, vl);
2048 }
2049 
2050 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
2051 // CHECK-RV64-NEXT:  entry:
2052 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2053 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2054 //
test_vmul_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)2055 vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
2056   return vmul(mask, maskedoff, op1, op2, vl);
2057 }
2058 
2059 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
2060 // CHECK-RV64-NEXT:  entry:
2061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2062 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2063 //
test_vmul_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)2064 vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
2065   return vmul(mask, maskedoff, op1, op2, vl);
2066 }
2067 
2068 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
2069 // CHECK-RV64-NEXT:  entry:
2070 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2071 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2072 //
test_vmul_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)2073 vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
2074   return vmul(mask, maskedoff, op1, op2, vl);
2075 }
2076 
2077 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
2078 // CHECK-RV64-NEXT:  entry:
2079 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2080 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2081 //
test_vmul_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)2082 vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
2083   return vmul(mask, maskedoff, op1, op2, vl);
2084 }
2085 
2086 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
2087 // CHECK-RV64-NEXT:  entry:
2088 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2089 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2090 //
test_vmul_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)2091 vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
2092   return vmul(mask, maskedoff, op1, op2, vl);
2093 }
2094 
2095 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
2096 // CHECK-RV64-NEXT:  entry:
2097 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2098 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2099 //
test_vmul_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)2100 vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
2101   return vmul(mask, maskedoff, op1, op2, vl);
2102 }
2103 
2104 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
2105 // CHECK-RV64-NEXT:  entry:
2106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2107 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2108 //
test_vmul_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)2109 vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
2110   return vmul(mask, maskedoff, op1, op2, vl);
2111 }
2112 
2113 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
2114 // CHECK-RV64-NEXT:  entry:
2115 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2116 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2117 //
test_vmul_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)2118 vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
2119   return vmul(mask, maskedoff, op1, op2, vl);
2120 }
2121 
2122 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
2123 // CHECK-RV64-NEXT:  entry:
2124 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2125 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2126 //
test_vmul_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)2127 vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
2128   return vmul(mask, maskedoff, op1, op2, vl);
2129 }
2130 
2131 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
2132 // CHECK-RV64-NEXT:  entry:
2133 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2134 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2135 //
test_vmul_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)2136 vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
2137   return vmul(mask, maskedoff, op1, op2, vl);
2138 }
2139 
2140 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
2141 // CHECK-RV64-NEXT:  entry:
2142 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2143 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2144 //
test_vmul_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)2145 vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
2146   return vmul(mask, maskedoff, op1, op2, vl);
2147 }
2148 
2149 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
2150 // CHECK-RV64-NEXT:  entry:
2151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2152 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2153 //
test_vmul_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)2154 vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
2155   return vmul(mask, maskedoff, op1, op2, vl);
2156 }
2157 
2158 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
2159 // CHECK-RV64-NEXT:  entry:
2160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2161 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2162 //
test_vmul_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)2163 vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
2164   return vmul(mask, maskedoff, op1, op2, vl);
2165 }
2166 
2167 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
2168 // CHECK-RV64-NEXT:  entry:
2169 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2170 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2171 //
test_vmul_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)2172 vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
2173   return vmul(mask, maskedoff, op1, op2, vl);
2174 }
2175 
2176 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
2177 // CHECK-RV64-NEXT:  entry:
2178 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2179 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2180 //
test_vmul_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)2181 vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
2182   return vmul(mask, maskedoff, op1, op2, vl);
2183 }
2184 
2185 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
2186 // CHECK-RV64-NEXT:  entry:
2187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2188 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2189 //
test_vmul_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)2190 vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
2191   return vmul(mask, maskedoff, op1, op2, vl);
2192 }
2193 
2194 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
2195 // CHECK-RV64-NEXT:  entry:
2196 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2197 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2198 //
test_vmul_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)2199 vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
2200   return vmul(mask, maskedoff, op1, op2, vl);
2201 }
2202 
2203 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
2204 // CHECK-RV64-NEXT:  entry:
2205 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2206 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2207 //
test_vmul_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)2208 vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
2209   return vmul(mask, maskedoff, op1, op2, vl);
2210 }
2211 
2212 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
2213 // CHECK-RV64-NEXT:  entry:
2214 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2215 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2216 //
test_vmul_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)2217 vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
2218   return vmul(mask, maskedoff, op1, op2, vl);
2219 }
2220 
2221 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
2222 // CHECK-RV64-NEXT:  entry:
2223 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2224 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2225 //
test_vmul_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)2226 vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
2227   return vmul(mask, maskedoff, op1, op2, vl);
2228 }
2229 
2230 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
2231 // CHECK-RV64-NEXT:  entry:
2232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2233 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2234 //
test_vmul_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)2235 vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
2236   return vmul(mask, maskedoff, op1, op2, vl);
2237 }
2238 
2239 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
2240 // CHECK-RV64-NEXT:  entry:
2241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2242 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2243 //
test_vmul_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)2244 vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
2245   return vmul(mask, maskedoff, op1, op2, vl);
2246 }
2247 
2248 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
2249 // CHECK-RV64-NEXT:  entry:
2250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2251 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2252 //
test_vmul_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)2253 vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
2254   return vmul(mask, maskedoff, op1, op2, vl);
2255 }
2256 
2257 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
2258 // CHECK-RV64-NEXT:  entry:
2259 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2260 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2261 //
test_vmul_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)2262 vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
2263   return vmul(mask, maskedoff, op1, op2, vl);
2264 }
2265 
2266 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
2267 // CHECK-RV64-NEXT:  entry:
2268 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2269 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2270 //
test_vmul_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)2271 vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
2272   return vmul(mask, maskedoff, op1, op2, vl);
2273 }
2274 
2275 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
2276 // CHECK-RV64-NEXT:  entry:
2277 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2278 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2279 //
test_vmul_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)2280 vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
2281   return vmul(mask, maskedoff, op1, op2, vl);
2282 }
2283 
2284 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
2285 // CHECK-RV64-NEXT:  entry:
2286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2287 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2288 //
test_vmul_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)2289 vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
2290   return vmul(mask, maskedoff, op1, op2, vl);
2291 }
2292 
2293 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
2294 // CHECK-RV64-NEXT:  entry:
2295 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2296 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2297 //
test_vmul_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)2298 vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
2299   return vmul(mask, maskedoff, op1, op2, vl);
2300 }
2301 
2302 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
2303 // CHECK-RV64-NEXT:  entry:
2304 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2305 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2306 //
test_vmul_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)2307 vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
2308   return vmul(mask, maskedoff, op1, op2, vl);
2309 }
2310 
2311 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
2312 // CHECK-RV64-NEXT:  entry:
2313 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2314 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2315 //
test_vmul_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)2316 vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
2317   return vmul(mask, maskedoff, op1, op2, vl);
2318 }
2319 
2320 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
2321 // CHECK-RV64-NEXT:  entry:
2322 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2323 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2324 //
test_vmul_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)2325 vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
2326   return vmul(mask, maskedoff, op1, op2, vl);
2327 }
2328 
2329 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
2330 // CHECK-RV64-NEXT:  entry:
2331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2332 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2333 //
test_vmul_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)2334 vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
2335   return vmul(mask, maskedoff, op1, op2, vl);
2336 }
2337 
2338 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
2339 // CHECK-RV64-NEXT:  entry:
2340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2341 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2342 //
test_vmul_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)2343 vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
2344   return vmul(mask, maskedoff, op1, op2, vl);
2345 }
2346 
2347 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
2348 // CHECK-RV64-NEXT:  entry:
2349 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2350 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2351 //
test_vmul_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)2352 vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
2353   return vmul(mask, maskedoff, op1, op2, vl);
2354 }
2355 
2356 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
2357 // CHECK-RV64-NEXT:  entry:
2358 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2359 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2360 //
test_vmul_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)2361 vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
2362   return vmul(mask, maskedoff, op1, op2, vl);
2363 }
2364 
2365 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
2366 // CHECK-RV64-NEXT:  entry:
2367 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2368 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2369 //
test_vmul_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)2370 vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
2371   return vmul(mask, maskedoff, op1, op2, vl);
2372 }
2373 
2374 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
2375 // CHECK-RV64-NEXT:  entry:
2376 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2377 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2378 //
test_vmul_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)2379 vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
2380   return vmul(mask, maskedoff, op1, op2, vl);
2381 }
2382 
2383 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
2384 // CHECK-RV64-NEXT:  entry:
2385 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2386 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2387 //
test_vmul_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)2388 vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
2389   return vmul(mask, maskedoff, op1, op2, vl);
2390 }
2391 
2392 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
2393 // CHECK-RV64-NEXT:  entry:
2394 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2395 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2396 //
test_vmul_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)2397 vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
2398   return vmul(mask, maskedoff, op1, op2, vl);
2399 }
2400 
2401 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
2402 // CHECK-RV64-NEXT:  entry:
2403 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2404 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2405 //
test_vmul_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)2406 vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
2407   return vmul(mask, maskedoff, op1, op2, vl);
2408 }
2409 
2410 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
2411 // CHECK-RV64-NEXT:  entry:
2412 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2413 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2414 //
test_vmul_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)2415 vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
2416   return vmul(mask, maskedoff, op1, op2, vl);
2417 }
2418 
2419 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
2420 // CHECK-RV64-NEXT:  entry:
2421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2422 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2423 //
test_vmul_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)2424 vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
2425   return vmul(mask, maskedoff, op1, op2, vl);
2426 }
2427 
2428 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
2429 // CHECK-RV64-NEXT:  entry:
2430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2431 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2432 //
test_vmul_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)2433 vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
2434   return vmul(mask, maskedoff, op1, op2, vl);
2435 }
2436 
2437 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
2438 // CHECK-RV64-NEXT:  entry:
2439 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2440 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2441 //
test_vmul_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)2442 vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
2443   return vmul(mask, maskedoff, op1, op2, vl);
2444 }
2445 
2446 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
2447 // CHECK-RV64-NEXT:  entry:
2448 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2449 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2450 //
test_vmul_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)2451 vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
2452   return vmul(mask, maskedoff, op1, op2, vl);
2453 }
2454 
2455 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
2456 // CHECK-RV64-NEXT:  entry:
2457 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2458 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2459 //
test_vmul_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)2460 vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
2461   return vmul(mask, maskedoff, op1, op2, vl);
2462 }
2463 
2464 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
2465 // CHECK-RV64-NEXT:  entry:
2466 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2467 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2468 //
test_vmul_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)2469 vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
2470   return vmul(mask, maskedoff, op1, op2, vl);
2471 }
2472 
2473 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
2474 // CHECK-RV64-NEXT:  entry:
2475 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2476 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2477 //
test_vmul_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)2478 vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
2479   return vmul(mask, maskedoff, op1, op2, vl);
2480 }
2481 
2482 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
2483 // CHECK-RV64-NEXT:  entry:
2484 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2485 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2486 //
test_vmul_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)2487 vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
2488   return vmul(mask, maskedoff, op1, op2, vl);
2489 }
2490 
2491 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
2492 // CHECK-RV64-NEXT:  entry:
2493 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2494 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2495 //
test_vmul_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)2496 vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
2497   return vmul(mask, maskedoff, op1, op2, vl);
2498 }
2499 
2500 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
2501 // CHECK-RV64-NEXT:  entry:
2502 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2503 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2504 //
test_vmul_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)2505 vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
2506   return vmul(mask, maskedoff, op1, op2, vl);
2507 }
2508 
2509 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
2510 // CHECK-RV64-NEXT:  entry:
2511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2512 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2513 //
test_vmul_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)2514 vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
2515   return vmul(mask, maskedoff, op1, op2, vl);
2516 }
2517 
2518 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
2519 // CHECK-RV64-NEXT:  entry:
2520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2521 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2522 //
test_vmul_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)2523 vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
2524   return vmul(mask, maskedoff, op1, op2, vl);
2525 }
2526 
2527 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
2528 // CHECK-RV64-NEXT:  entry:
2529 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2530 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2531 //
test_vmul_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)2532 vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
2533   return vmul(mask, maskedoff, op1, op2, vl);
2534 }
2535 
2536 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
2537 // CHECK-RV64-NEXT:  entry:
2538 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2539 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2540 //
test_vmul_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)2541 vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
2542   return vmul(mask, maskedoff, op1, op2, vl);
2543 }
2544 
2545 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
2546 // CHECK-RV64-NEXT:  entry:
2547 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2548 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2549 //
test_vmul_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)2550 vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
2551   return vmul(mask, maskedoff, op1, op2, vl);
2552 }
2553 
2554 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
2555 // CHECK-RV64-NEXT:  entry:
2556 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2557 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2558 //
test_vmul_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)2559 vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
2560   return vmul(mask, maskedoff, op1, op2, vl);
2561 }
2562 
2563 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
2564 // CHECK-RV64-NEXT:  entry:
2565 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2566 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2567 //
test_vmul_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)2568 vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
2569   return vmul(mask, maskedoff, op1, op2, vl);
2570 }
2571 
2572 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
2573 // CHECK-RV64-NEXT:  entry:
2574 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2575 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2576 //
test_vmul_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)2577 vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
2578   return vmul(mask, maskedoff, op1, op2, vl);
2579 }
2580 
2581 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
2582 // CHECK-RV64-NEXT:  entry:
2583 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2584 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2585 //
test_vmul_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)2586 vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
2587   return vmul(mask, maskedoff, op1, op2, vl);
2588 }
2589 
2590 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
2591 // CHECK-RV64-NEXT:  entry:
2592 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2593 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2594 //
test_vmul_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)2595 vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
2596   return vmul(mask, maskedoff, op1, op2, vl);
2597 }
2598 
2599 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
2600 // CHECK-RV64-NEXT:  entry:
2601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2602 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2603 //
test_vmul_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)2604 vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
2605   return vmul(mask, maskedoff, op1, op2, vl);
2606 }
2607 
2608 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
2609 // CHECK-RV64-NEXT:  entry:
2610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2611 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2612 //
test_vmul_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)2613 vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
2614   return vmul(mask, maskedoff, op1, op2, vl);
2615 }
2616 
2617 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
2618 // CHECK-RV64-NEXT:  entry:
2619 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2620 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2621 //
test_vmul_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)2622 vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
2623   return vmul(mask, maskedoff, op1, op2, vl);
2624 }
2625 
2626 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
2627 // CHECK-RV64-NEXT:  entry:
2628 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2629 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2630 //
test_vmul_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)2631 vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
2632   return vmul(mask, maskedoff, op1, op2, vl);
2633 }
2634 
2635 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
2636 // CHECK-RV64-NEXT:  entry:
2637 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2638 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2639 //
test_vmul_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)2640 vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
2641   return vmul(mask, maskedoff, op1, op2, vl);
2642 }
2643 
2644 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
2645 // CHECK-RV64-NEXT:  entry:
2646 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2647 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2648 //
test_vmul_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)2649 vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
2650   return vmul(mask, maskedoff, op1, op2, vl);
2651 }
2652 
2653 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
2654 // CHECK-RV64-NEXT:  entry:
2655 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2656 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2657 //
test_vmul_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)2658 vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
2659   return vmul(mask, maskedoff, op1, op2, vl);
2660 }
2661 
2662 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
2663 // CHECK-RV64-NEXT:  entry:
2664 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2665 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2666 //
test_vmul_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)2667 vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
2668   return vmul(mask, maskedoff, op1, op2, vl);
2669 }
2670 
2671 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
2672 // CHECK-RV64-NEXT:  entry:
2673 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2674 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2675 //
test_vmul_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)2676 vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
2677   return vmul(mask, maskedoff, op1, op2, vl);
2678 }
2679 
2680 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
2681 // CHECK-RV64-NEXT:  entry:
2682 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2683 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2684 //
test_vmul_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)2685 vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
2686   return vmul(mask, maskedoff, op1, op2, vl);
2687 }
2688 
2689 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
2690 // CHECK-RV64-NEXT:  entry:
2691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2692 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2693 //
test_vmul_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)2694 vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
2695   return vmul(mask, maskedoff, op1, op2, vl);
2696 }
2697 
2698 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
2699 // CHECK-RV64-NEXT:  entry:
2700 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2701 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2702 //
test_vmul_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)2703 vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
2704   return vmul(mask, maskedoff, op1, op2, vl);
2705 }
2706 
2707 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
2708 // CHECK-RV64-NEXT:  entry:
2709 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2710 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2711 //
test_vmul_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)2712 vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
2713   return vmul(mask, maskedoff, op1, op2, vl);
2714 }
2715 
2716 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
2717 // CHECK-RV64-NEXT:  entry:
2718 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2719 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2720 //
test_vmul_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)2721 vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
2722   return vmul(mask, maskedoff, op1, op2, vl);
2723 }
2724 
2725 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
2726 // CHECK-RV64-NEXT:  entry:
2727 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2728 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2729 //
test_vmul_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)2730 vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
2731   return vmul(mask, maskedoff, op1, op2, vl);
2732 }
2733 
2734 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
2735 // CHECK-RV64-NEXT:  entry:
2736 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2737 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2738 //
test_vmul_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)2739 vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
2740   return vmul(mask, maskedoff, op1, op2, vl);
2741 }
2742 
2743 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
2744 // CHECK-RV64-NEXT:  entry:
2745 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2746 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2747 //
test_vmul_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)2748 vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
2749   return vmul(mask, maskedoff, op1, op2, vl);
2750 }
2751 
2752 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
2753 // CHECK-RV64-NEXT:  entry:
2754 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2755 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2756 //
test_vmul_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)2757 vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
2758   return vmul(mask, maskedoff, op1, op2, vl);
2759 }
2760 
2761 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
2762 // CHECK-RV64-NEXT:  entry:
2763 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2764 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2765 //
test_vmul_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)2766 vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
2767   return vmul(mask, maskedoff, op1, op2, vl);
2768 }
2769 
2770 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
2771 // CHECK-RV64-NEXT:  entry:
2772 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2773 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2774 //
test_vmul_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)2775 vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
2776   return vmul(mask, maskedoff, op1, op2, vl);
2777 }
2778 
2779 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
2780 // CHECK-RV64-NEXT:  entry:
2781 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2782 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2783 //
test_vmulh_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)2784 vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
2785   return vmulh(mask, maskedoff, op1, op2, vl);
2786 }
2787 
2788 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
2789 // CHECK-RV64-NEXT:  entry:
2790 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2791 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2792 //
test_vmulh_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)2793 vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
2794   return vmulh(mask, maskedoff, op1, op2, vl);
2795 }
2796 
2797 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
2798 // CHECK-RV64-NEXT:  entry:
2799 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2800 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2801 //
test_vmulh_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)2802 vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
2803   return vmulh(mask, maskedoff, op1, op2, vl);
2804 }
2805 
2806 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
2807 // CHECK-RV64-NEXT:  entry:
2808 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2809 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2810 //
test_vmulh_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)2811 vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
2812   return vmulh(mask, maskedoff, op1, op2, vl);
2813 }
2814 
2815 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
2816 // CHECK-RV64-NEXT:  entry:
2817 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2818 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2819 //
test_vmulh_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)2820 vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
2821   return vmulh(mask, maskedoff, op1, op2, vl);
2822 }
2823 
2824 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
2825 // CHECK-RV64-NEXT:  entry:
2826 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2827 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2828 //
test_vmulh_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)2829 vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
2830   return vmulh(mask, maskedoff, op1, op2, vl);
2831 }
2832 
2833 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
2834 // CHECK-RV64-NEXT:  entry:
2835 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2836 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2837 //
test_vmulh_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)2838 vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
2839   return vmulh(mask, maskedoff, op1, op2, vl);
2840 }
2841 
2842 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
2843 // CHECK-RV64-NEXT:  entry:
2844 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2845 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2846 //
test_vmulh_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)2847 vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
2848   return vmulh(mask, maskedoff, op1, op2, vl);
2849 }
2850 
2851 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
2852 // CHECK-RV64-NEXT:  entry:
2853 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2854 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2855 //
test_vmulh_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)2856 vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
2857   return vmulh(mask, maskedoff, op1, op2, vl);
2858 }
2859 
2860 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
2861 // CHECK-RV64-NEXT:  entry:
2862 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2863 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2864 //
test_vmulh_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)2865 vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
2866   return vmulh(mask, maskedoff, op1, op2, vl);
2867 }
2868 
2869 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
2870 // CHECK-RV64-NEXT:  entry:
2871 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2872 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2873 //
test_vmulh_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)2874 vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
2875   return vmulh(mask, maskedoff, op1, op2, vl);
2876 }
2877 
2878 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
2879 // CHECK-RV64-NEXT:  entry:
2880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2881 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2882 //
test_vmulh_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)2883 vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
2884   return vmulh(mask, maskedoff, op1, op2, vl);
2885 }
2886 
2887 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
2888 // CHECK-RV64-NEXT:  entry:
2889 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2890 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2891 //
test_vmulh_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)2892 vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
2893   return vmulh(mask, maskedoff, op1, op2, vl);
2894 }
2895 
2896 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
2897 // CHECK-RV64-NEXT:  entry:
2898 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2899 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2900 //
test_vmulh_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)2901 vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
2902   return vmulh(mask, maskedoff, op1, op2, vl);
2903 }
2904 
2905 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
2906 // CHECK-RV64-NEXT:  entry:
2907 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2908 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2909 //
test_vmulh_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)2910 vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
2911   return vmulh(mask, maskedoff, op1, op2, vl);
2912 }
2913 
2914 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
2915 // CHECK-RV64-NEXT:  entry:
2916 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2917 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2918 //
test_vmulh_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)2919 vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
2920   return vmulh(mask, maskedoff, op1, op2, vl);
2921 }
2922 
2923 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
2924 // CHECK-RV64-NEXT:  entry:
2925 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2926 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2927 //
test_vmulh_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)2928 vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
2929   return vmulh(mask, maskedoff, op1, op2, vl);
2930 }
2931 
2932 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
2933 // CHECK-RV64-NEXT:  entry:
2934 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2935 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2936 //
test_vmulh_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)2937 vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
2938   return vmulh(mask, maskedoff, op1, op2, vl);
2939 }
2940 
2941 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
2942 // CHECK-RV64-NEXT:  entry:
2943 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2944 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2945 //
test_vmulh_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)2946 vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
2947   return vmulh(mask, maskedoff, op1, op2, vl);
2948 }
2949 
2950 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
2951 // CHECK-RV64-NEXT:  entry:
2952 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2953 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2954 //
test_vmulh_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)2955 vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
2956   return vmulh(mask, maskedoff, op1, op2, vl);
2957 }
2958 
2959 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
2960 // CHECK-RV64-NEXT:  entry:
2961 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2962 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2963 //
test_vmulh_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)2964 vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
2965   return vmulh(mask, maskedoff, op1, op2, vl);
2966 }
2967 
2968 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
2969 // CHECK-RV64-NEXT:  entry:
2970 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2971 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2972 //
test_vmulh_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)2973 vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
2974   return vmulh(mask, maskedoff, op1, op2, vl);
2975 }
2976 
2977 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
2978 // CHECK-RV64-NEXT:  entry:
2979 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2980 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2981 //
test_vmulh_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)2982 vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
2983   return vmulh(mask, maskedoff, op1, op2, vl);
2984 }
2985 
2986 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
2987 // CHECK-RV64-NEXT:  entry:
2988 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2989 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2990 //
test_vmulh_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)2991 vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
2992   return vmulh(mask, maskedoff, op1, op2, vl);
2993 }
2994 
2995 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
2996 // CHECK-RV64-NEXT:  entry:
2997 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2998 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2999 //
test_vmulh_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)3000 vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
3001   return vmulh(mask, maskedoff, op1, op2, vl);
3002 }
3003 
3004 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
3005 // CHECK-RV64-NEXT:  entry:
3006 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3007 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
3008 //
test_vmulh_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)3009 vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
3010   return vmulh(mask, maskedoff, op1, op2, vl);
3011 }
3012 
3013 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
3014 // CHECK-RV64-NEXT:  entry:
3015 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3016 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3017 //
test_vmulh_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)3018 vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
3019   return vmulh(mask, maskedoff, op1, op2, vl);
3020 }
3021 
3022 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
3023 // CHECK-RV64-NEXT:  entry:
3024 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3025 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3026 //
test_vmulh_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)3027 vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
3028   return vmulh(mask, maskedoff, op1, op2, vl);
3029 }
3030 
3031 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
3032 // CHECK-RV64-NEXT:  entry:
3033 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3034 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3035 //
test_vmulh_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)3036 vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
3037   return vmulh(mask, maskedoff, op1, op2, vl);
3038 }
3039 
3040 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
3041 // CHECK-RV64-NEXT:  entry:
3042 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3043 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3044 //
test_vmulh_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)3045 vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
3046   return vmulh(mask, maskedoff, op1, op2, vl);
3047 }
3048 
3049 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
3050 // CHECK-RV64-NEXT:  entry:
3051 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3052 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3053 //
test_vmulh_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)3054 vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
3055   return vmulh(mask, maskedoff, op1, op2, vl);
3056 }
3057 
3058 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
3059 // CHECK-RV64-NEXT:  entry:
3060 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3061 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3062 //
test_vmulh_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)3063 vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
3064   return vmulh(mask, maskedoff, op1, op2, vl);
3065 }
3066 
3067 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
3068 // CHECK-RV64-NEXT:  entry:
3069 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3070 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3071 //
test_vmulh_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)3072 vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
3073   return vmulh(mask, maskedoff, op1, op2, vl);
3074 }
3075 
3076 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
3077 // CHECK-RV64-NEXT:  entry:
3078 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3079 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3080 //
test_vmulh_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)3081 vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
3082   return vmulh(mask, maskedoff, op1, op2, vl);
3083 }
3084 
3085 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
3086 // CHECK-RV64-NEXT:  entry:
3087 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3088 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3089 //
test_vmulh_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)3090 vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
3091   return vmulh(mask, maskedoff, op1, op2, vl);
3092 }
3093 
3094 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
3095 // CHECK-RV64-NEXT:  entry:
3096 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3097 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3098 //
test_vmulh_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)3099 vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
3100   return vmulh(mask, maskedoff, op1, op2, vl);
3101 }
3102 
3103 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
3104 // CHECK-RV64-NEXT:  entry:
3105 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3106 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3107 //
test_vmulh_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)3108 vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
3109   return vmulh(mask, maskedoff, op1, op2, vl);
3110 }
3111 
3112 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
3113 // CHECK-RV64-NEXT:  entry:
3114 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3115 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3116 //
test_vmulh_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)3117 vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
3118   return vmulh(mask, maskedoff, op1, op2, vl);
3119 }
3120 
3121 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
3122 // CHECK-RV64-NEXT:  entry:
3123 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3124 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3125 //
test_vmulh_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)3126 vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
3127   return vmulh(mask, maskedoff, op1, op2, vl);
3128 }
3129 
3130 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
3131 // CHECK-RV64-NEXT:  entry:
3132 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3133 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3134 //
test_vmulh_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)3135 vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
3136   return vmulh(mask, maskedoff, op1, op2, vl);
3137 }
3138 
3139 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
3140 // CHECK-RV64-NEXT:  entry:
3141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3142 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3143 //
test_vmulh_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)3144 vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
3145   return vmulh(mask, maskedoff, op1, op2, vl);
3146 }
3147 
3148 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
3149 // CHECK-RV64-NEXT:  entry:
3150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3151 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3152 //
test_vmulh_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)3153 vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
3154   return vmulh(mask, maskedoff, op1, op2, vl);
3155 }
3156 
3157 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
3158 // CHECK-RV64-NEXT:  entry:
3159 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3160 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3161 //
test_vmulh_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)3162 vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
3163   return vmulh(mask, maskedoff, op1, op2, vl);
3164 }
3165 
3166 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
3167 // CHECK-RV64-NEXT:  entry:
3168 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3169 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3170 //
test_vmulh_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)3171 vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
3172   return vmulh(mask, maskedoff, op1, op2, vl);
3173 }
3174 
3175 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
3176 // CHECK-RV64-NEXT:  entry:
3177 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3178 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3179 //
test_vmulhu_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)3180 vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
3181   return vmulhu(mask, maskedoff, op1, op2, vl);
3182 }
3183 
3184 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
3185 // CHECK-RV64-NEXT:  entry:
3186 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3187 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3188 //
test_vmulhu_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)3189 vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
3190   return vmulhu(mask, maskedoff, op1, op2, vl);
3191 }
3192 
3193 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
3194 // CHECK-RV64-NEXT:  entry:
3195 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3196 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3197 //
test_vmulhu_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)3198 vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
3199   return vmulhu(mask, maskedoff, op1, op2, vl);
3200 }
3201 
3202 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
3203 // CHECK-RV64-NEXT:  entry:
3204 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3205 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3206 //
test_vmulhu_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)3207 vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
3208   return vmulhu(mask, maskedoff, op1, op2, vl);
3209 }
3210 
3211 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
3212 // CHECK-RV64-NEXT:  entry:
3213 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3214 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
3215 //
test_vmulhu_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)3216 vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
3217   return vmulhu(mask, maskedoff, op1, op2, vl);
3218 }
3219 
3220 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
3221 // CHECK-RV64-NEXT:  entry:
3222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3223 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
3224 //
test_vmulhu_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)3225 vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
3226   return vmulhu(mask, maskedoff, op1, op2, vl);
3227 }
3228 
3229 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
3230 // CHECK-RV64-NEXT:  entry:
3231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3232 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
3233 //
test_vmulhu_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)3234 vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
3235   return vmulhu(mask, maskedoff, op1, op2, vl);
3236 }
3237 
3238 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
3239 // CHECK-RV64-NEXT:  entry:
3240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3241 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
3242 //
test_vmulhu_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)3243 vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
3244   return vmulhu(mask, maskedoff, op1, op2, vl);
3245 }
3246 
3247 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
3248 // CHECK-RV64-NEXT:  entry:
3249 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3250 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
3251 //
test_vmulhu_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)3252 vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
3253   return vmulhu(mask, maskedoff, op1, op2, vl);
3254 }
3255 
3256 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
3257 // CHECK-RV64-NEXT:  entry:
3258 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3259 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
3260 //
test_vmulhu_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)3261 vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
3262   return vmulhu(mask, maskedoff, op1, op2, vl);
3263 }
3264 
3265 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
3266 // CHECK-RV64-NEXT:  entry:
3267 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3268 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
3269 //
test_vmulhu_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)3270 vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
3271   return vmulhu(mask, maskedoff, op1, op2, vl);
3272 }
3273 
3274 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
3275 // CHECK-RV64-NEXT:  entry:
3276 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3277 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
3278 //
test_vmulhu_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)3279 vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
3280   return vmulhu(mask, maskedoff, op1, op2, vl);
3281 }
3282 
3283 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
3284 // CHECK-RV64-NEXT:  entry:
3285 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3286 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
3287 //
test_vmulhu_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)3288 vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
3289   return vmulhu(mask, maskedoff, op1, op2, vl);
3290 }
3291 
3292 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
3293 // CHECK-RV64-NEXT:  entry:
3294 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3295 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
3296 //
test_vmulhu_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)3297 vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
3298   return vmulhu(mask, maskedoff, op1, op2, vl);
3299 }
3300 
3301 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
3302 // CHECK-RV64-NEXT:  entry:
3303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3304 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
3305 //
test_vmulhu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)3306 vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
3307   return vmulhu(mask, maskedoff, op1, op2, vl);
3308 }
3309 
3310 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
3311 // CHECK-RV64-NEXT:  entry:
3312 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3313 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
3314 //
test_vmulhu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)3315 vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
3316   return vmulhu(mask, maskedoff, op1, op2, vl);
3317 }
3318 
3319 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
3320 // CHECK-RV64-NEXT:  entry:
3321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3322 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
3323 //
test_vmulhu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)3324 vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
3325   return vmulhu(mask, maskedoff, op1, op2, vl);
3326 }
3327 
3328 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
3329 // CHECK-RV64-NEXT:  entry:
3330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3331 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
3332 //
test_vmulhu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)3333 vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
3334   return vmulhu(mask, maskedoff, op1, op2, vl);
3335 }
3336 
3337 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
3338 // CHECK-RV64-NEXT:  entry:
3339 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3340 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
3341 //
test_vmulhu_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)3342 vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
3343   return vmulhu(mask, maskedoff, op1, op2, vl);
3344 }
3345 
3346 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
3347 // CHECK-RV64-NEXT:  entry:
3348 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3349 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
3350 //
test_vmulhu_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)3351 vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
3352   return vmulhu(mask, maskedoff, op1, op2, vl);
3353 }
3354 
3355 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
3356 // CHECK-RV64-NEXT:  entry:
3357 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3358 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
3359 //
test_vmulhu_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)3360 vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
3361   return vmulhu(mask, maskedoff, op1, op2, vl);
3362 }
3363 
3364 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
3365 // CHECK-RV64-NEXT:  entry:
3366 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3367 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
3368 //
test_vmulhu_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)3369 vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
3370   return vmulhu(mask, maskedoff, op1, op2, vl);
3371 }
3372 
3373 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
3374 // CHECK-RV64-NEXT:  entry:
3375 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3376 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
3377 //
test_vmulhu_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)3378 vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
3379   return vmulhu(mask, maskedoff, op1, op2, vl);
3380 }
3381 
3382 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
3383 // CHECK-RV64-NEXT:  entry:
3384 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3385 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
3386 //
test_vmulhu_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)3387 vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
3388   return vmulhu(mask, maskedoff, op1, op2, vl);
3389 }
3390 
3391 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
3392 // CHECK-RV64-NEXT:  entry:
3393 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3394 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
3395 //
test_vmulhu_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)3396 vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
3397   return vmulhu(mask, maskedoff, op1, op2, vl);
3398 }
3399 
3400 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
3401 // CHECK-RV64-NEXT:  entry:
3402 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3403 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
3404 //
test_vmulhu_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)3405 vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
3406   return vmulhu(mask, maskedoff, op1, op2, vl);
3407 }
3408 
3409 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
3410 // CHECK-RV64-NEXT:  entry:
3411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3412 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3413 //
test_vmulhu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)3414 vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
3415   return vmulhu(mask, maskedoff, op1, op2, vl);
3416 }
3417 
3418 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
3419 // CHECK-RV64-NEXT:  entry:
3420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3421 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3422 //
test_vmulhu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)3423 vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
3424   return vmulhu(mask, maskedoff, op1, op2, vl);
3425 }
3426 
3427 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
3428 // CHECK-RV64-NEXT:  entry:
3429 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3430 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3431 //
test_vmulhu_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)3432 vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
3433   return vmulhu(mask, maskedoff, op1, op2, vl);
3434 }
3435 
3436 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
3437 // CHECK-RV64-NEXT:  entry:
3438 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3439 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3440 //
test_vmulhu_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)3441 vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
3442   return vmulhu(mask, maskedoff, op1, op2, vl);
3443 }
3444 
3445 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
3446 // CHECK-RV64-NEXT:  entry:
3447 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3448 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3449 //
test_vmulhu_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)3450 vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
3451   return vmulhu(mask, maskedoff, op1, op2, vl);
3452 }
3453 
3454 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
3455 // CHECK-RV64-NEXT:  entry:
3456 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3457 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3458 //
test_vmulhu_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)3459 vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
3460   return vmulhu(mask, maskedoff, op1, op2, vl);
3461 }
3462 
3463 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
3464 // CHECK-RV64-NEXT:  entry:
3465 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3466 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3467 //
test_vmulhu_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)3468 vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
3469   return vmulhu(mask, maskedoff, op1, op2, vl);
3470 }
3471 
3472 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
3473 // CHECK-RV64-NEXT:  entry:
3474 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3475 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3476 //
test_vmulhu_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)3477 vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
3478   return vmulhu(mask, maskedoff, op1, op2, vl);
3479 }
3480 
3481 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
3482 // CHECK-RV64-NEXT:  entry:
3483 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3484 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3485 //
test_vmulhu_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)3486 vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
3487   return vmulhu(mask, maskedoff, op1, op2, vl);
3488 }
3489 
3490 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
3491 // CHECK-RV64-NEXT:  entry:
3492 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3493 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3494 //
test_vmulhu_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)3495 vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
3496   return vmulhu(mask, maskedoff, op1, op2, vl);
3497 }
3498 
3499 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
3500 // CHECK-RV64-NEXT:  entry:
3501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3502 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3503 //
test_vmulhu_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)3504 vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
3505   return vmulhu(mask, maskedoff, op1, op2, vl);
3506 }
3507 
3508 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
3509 // CHECK-RV64-NEXT:  entry:
3510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3511 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3512 //
test_vmulhu_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)3513 vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
3514   return vmulhu(mask, maskedoff, op1, op2, vl);
3515 }
3516 
3517 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
3518 // CHECK-RV64-NEXT:  entry:
3519 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3520 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3521 //
test_vmulhu_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)3522 vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
3523   return vmulhu(mask, maskedoff, op1, op2, vl);
3524 }
3525 
3526 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
3527 // CHECK-RV64-NEXT:  entry:
3528 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3529 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3530 //
test_vmulhu_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)3531 vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
3532   return vmulhu(mask, maskedoff, op1, op2, vl);
3533 }
3534 
3535 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
3536 // CHECK-RV64-NEXT:  entry:
3537 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3538 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3539 //
test_vmulhu_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)3540 vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
3541   return vmulhu(mask, maskedoff, op1, op2, vl);
3542 }
3543 
3544 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
3545 // CHECK-RV64-NEXT:  entry:
3546 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3547 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3548 //
test_vmulhu_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)3549 vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
3550   return vmulhu(mask, maskedoff, op1, op2, vl);
3551 }
3552 
3553 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
3554 // CHECK-RV64-NEXT:  entry:
3555 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3556 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3557 //
test_vmulhu_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)3558 vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
3559   return vmulhu(mask, maskedoff, op1, op2, vl);
3560 }
3561 
3562 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
3563 // CHECK-RV64-NEXT:  entry:
3564 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3565 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3566 //
test_vmulhu_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)3567 vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
3568   return vmulhu(mask, maskedoff, op1, op2, vl);
3569 }
3570 
3571 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
3572 // CHECK-RV64-NEXT:  entry:
3573 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3574 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3575 //
test_vmulhsu_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vuint8mf8_t op2,size_t vl)3576 vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
3577   return vmulhsu(mask, maskedoff, op1, op2, vl);
3578 }
3579 
3580 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
3581 // CHECK-RV64-NEXT:  entry:
3582 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3583 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3584 //
test_vmulhsu_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,uint8_t op2,size_t vl)3585 vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
3586   return vmulhsu(mask, maskedoff, op1, op2, vl);
3587 }
3588 
3589 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
3590 // CHECK-RV64-NEXT:  entry:
3591 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3592 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3593 //
test_vmulhsu_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vuint8mf4_t op2,size_t vl)3594 vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
3595   return vmulhsu(mask, maskedoff, op1, op2, vl);
3596 }
3597 
3598 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
3599 // CHECK-RV64-NEXT:  entry:
3600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3601 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3602 //
test_vmulhsu_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,uint8_t op2,size_t vl)3603 vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
3604   return vmulhsu(mask, maskedoff, op1, op2, vl);
3605 }
3606 
3607 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
3608 // CHECK-RV64-NEXT:  entry:
3609 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3610 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
3611 //
test_vmulhsu_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vuint8mf2_t op2,size_t vl)3612 vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
3613   return vmulhsu(mask, maskedoff, op1, op2, vl);
3614 }
3615 
3616 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
3617 // CHECK-RV64-NEXT:  entry:
3618 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3619 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
3620 //
test_vmulhsu_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,uint8_t op2,size_t vl)3621 vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
3622   return vmulhsu(mask, maskedoff, op1, op2, vl);
3623 }
3624 
3625 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
3626 // CHECK-RV64-NEXT:  entry:
3627 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3628 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
3629 //
test_vmulhsu_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vuint8m1_t op2,size_t vl)3630 vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
3631   return vmulhsu(mask, maskedoff, op1, op2, vl);
3632 }
3633 
3634 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
3635 // CHECK-RV64-NEXT:  entry:
3636 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3637 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
3638 //
test_vmulhsu_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,uint8_t op2,size_t vl)3639 vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
3640   return vmulhsu(mask, maskedoff, op1, op2, vl);
3641 }
3642 
3643 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
3644 // CHECK-RV64-NEXT:  entry:
3645 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3646 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
3647 //
test_vmulhsu_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vuint8m2_t op2,size_t vl)3648 vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
3649   return vmulhsu(mask, maskedoff, op1, op2, vl);
3650 }
3651 
3652 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
3653 // CHECK-RV64-NEXT:  entry:
3654 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3655 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
3656 //
test_vmulhsu_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,uint8_t op2,size_t vl)3657 vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
3658   return vmulhsu(mask, maskedoff, op1, op2, vl);
3659 }
3660 
3661 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
3662 // CHECK-RV64-NEXT:  entry:
3663 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3664 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
3665 //
test_vmulhsu_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vuint8m4_t op2,size_t vl)3666 vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
3667   return vmulhsu(mask, maskedoff, op1, op2, vl);
3668 }
3669 
3670 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
3671 // CHECK-RV64-NEXT:  entry:
3672 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3673 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
3674 //
test_vmulhsu_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,uint8_t op2,size_t vl)3675 vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
3676   return vmulhsu(mask, maskedoff, op1, op2, vl);
3677 }
3678 
3679 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
3680 // CHECK-RV64-NEXT:  entry:
3681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3682 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
3683 //
test_vmulhsu_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vuint8m8_t op2,size_t vl)3684 vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
3685   return vmulhsu(mask, maskedoff, op1, op2, vl);
3686 }
3687 
3688 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
3689 // CHECK-RV64-NEXT:  entry:
3690 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3691 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
3692 //
test_vmulhsu_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,uint8_t op2,size_t vl)3693 vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
3694   return vmulhsu(mask, maskedoff, op1, op2, vl);
3695 }
3696 
3697 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
3698 // CHECK-RV64-NEXT:  entry:
3699 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3700 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
3701 //
test_vmulhsu_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vuint16mf4_t op2,size_t vl)3702 vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
3703   return vmulhsu(mask, maskedoff, op1, op2, vl);
3704 }
3705 
3706 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
3707 // CHECK-RV64-NEXT:  entry:
3708 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3709 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
3710 //
test_vmulhsu_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,uint16_t op2,size_t vl)3711 vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
3712   return vmulhsu(mask, maskedoff, op1, op2, vl);
3713 }
3714 
3715 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
3716 // CHECK-RV64-NEXT:  entry:
3717 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3718 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
3719 //
test_vmulhsu_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vuint16mf2_t op2,size_t vl)3720 vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
3721   return vmulhsu(mask, maskedoff, op1, op2, vl);
3722 }
3723 
3724 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
3725 // CHECK-RV64-NEXT:  entry:
3726 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3727 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
3728 //
test_vmulhsu_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,uint16_t op2,size_t vl)3729 vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
3730   return vmulhsu(mask, maskedoff, op1, op2, vl);
3731 }
3732 
3733 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
3734 // CHECK-RV64-NEXT:  entry:
3735 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3736 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
3737 //
test_vmulhsu_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vuint16m1_t op2,size_t vl)3738 vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
3739   return vmulhsu(mask, maskedoff, op1, op2, vl);
3740 }
3741 
3742 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
3743 // CHECK-RV64-NEXT:  entry:
3744 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3745 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
3746 //
test_vmulhsu_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,uint16_t op2,size_t vl)3747 vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
3748   return vmulhsu(mask, maskedoff, op1, op2, vl);
3749 }
3750 
3751 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
3752 // CHECK-RV64-NEXT:  entry:
3753 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3754 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
3755 //
test_vmulhsu_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vuint16m2_t op2,size_t vl)3756 vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
3757   return vmulhsu(mask, maskedoff, op1, op2, vl);
3758 }
3759 
3760 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
3761 // CHECK-RV64-NEXT:  entry:
3762 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3763 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
3764 //
test_vmulhsu_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,uint16_t op2,size_t vl)3765 vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
3766   return vmulhsu(mask, maskedoff, op1, op2, vl);
3767 }
3768 
3769 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
3770 // CHECK-RV64-NEXT:  entry:
3771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3772 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
3773 //
test_vmulhsu_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vuint16m4_t op2,size_t vl)3774 vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
3775   return vmulhsu(mask, maskedoff, op1, op2, vl);
3776 }
3777 
3778 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
3779 // CHECK-RV64-NEXT:  entry:
3780 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3781 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
3782 //
test_vmulhsu_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,uint16_t op2,size_t vl)3783 vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
3784   return vmulhsu(mask, maskedoff, op1, op2, vl);
3785 }
3786 
3787 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
3788 // CHECK-RV64-NEXT:  entry:
3789 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3790 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
3791 //
test_vmulhsu_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vuint16m8_t op2,size_t vl)3792 vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
3793   return vmulhsu(mask, maskedoff, op1, op2, vl);
3794 }
3795 
3796 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
3797 // CHECK-RV64-NEXT:  entry:
3798 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3799 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
3800 //
test_vmulhsu_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,uint16_t op2,size_t vl)3801 vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
3802   return vmulhsu(mask, maskedoff, op1, op2, vl);
3803 }
3804 
3805 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
3806 // CHECK-RV64-NEXT:  entry:
3807 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3808 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3809 //
test_vmulhsu_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vuint32mf2_t op2,size_t vl)3810 vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
3811   return vmulhsu(mask, maskedoff, op1, op2, vl);
3812 }
3813 
3814 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
3815 // CHECK-RV64-NEXT:  entry:
3816 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3817 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
3818 //
test_vmulhsu_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,uint32_t op2,size_t vl)3819 vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
3820   return vmulhsu(mask, maskedoff, op1, op2, vl);
3821 }
3822 
3823 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
3824 // CHECK-RV64-NEXT:  entry:
3825 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3826 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3827 //
test_vmulhsu_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vuint32m1_t op2,size_t vl)3828 vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
3829   return vmulhsu(mask, maskedoff, op1, op2, vl);
3830 }
3831 
3832 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
3833 // CHECK-RV64-NEXT:  entry:
3834 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3835 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
3836 //
test_vmulhsu_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,uint32_t op2,size_t vl)3837 vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
3838   return vmulhsu(mask, maskedoff, op1, op2, vl);
3839 }
3840 
3841 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
3842 // CHECK-RV64-NEXT:  entry:
3843 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3844 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3845 //
test_vmulhsu_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vuint32m2_t op2,size_t vl)3846 vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
3847   return vmulhsu(mask, maskedoff, op1, op2, vl);
3848 }
3849 
3850 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
3851 // CHECK-RV64-NEXT:  entry:
3852 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3853 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
3854 //
test_vmulhsu_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,uint32_t op2,size_t vl)3855 vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
3856   return vmulhsu(mask, maskedoff, op1, op2, vl);
3857 }
3858 
3859 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
3860 // CHECK-RV64-NEXT:  entry:
3861 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3862 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3863 //
test_vmulhsu_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vuint32m4_t op2,size_t vl)3864 vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
3865   return vmulhsu(mask, maskedoff, op1, op2, vl);
3866 }
3867 
3868 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
3869 // CHECK-RV64-NEXT:  entry:
3870 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3871 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
3872 //
test_vmulhsu_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,uint32_t op2,size_t vl)3873 vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
3874   return vmulhsu(mask, maskedoff, op1, op2, vl);
3875 }
3876 
3877 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
3878 // CHECK-RV64-NEXT:  entry:
3879 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3880 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3881 //
test_vmulhsu_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vuint32m8_t op2,size_t vl)3882 vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
3883   return vmulhsu(mask, maskedoff, op1, op2, vl);
3884 }
3885 
3886 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
3887 // CHECK-RV64-NEXT:  entry:
3888 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3889 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
3890 //
test_vmulhsu_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,uint32_t op2,size_t vl)3891 vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
3892   return vmulhsu(mask, maskedoff, op1, op2, vl);
3893 }
3894 
3895 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
3896 // CHECK-RV64-NEXT:  entry:
3897 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3898 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3899 //
test_vmulhsu_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vuint64m1_t op2,size_t vl)3900 vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
3901   return vmulhsu(mask, maskedoff, op1, op2, vl);
3902 }
3903 
3904 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
3905 // CHECK-RV64-NEXT:  entry:
3906 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3907 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
3908 //
test_vmulhsu_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,uint64_t op2,size_t vl)3909 vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
3910   return vmulhsu(mask, maskedoff, op1, op2, vl);
3911 }
3912 
3913 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
3914 // CHECK-RV64-NEXT:  entry:
3915 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3916 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3917 //
test_vmulhsu_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vuint64m2_t op2,size_t vl)3918 vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
3919   return vmulhsu(mask, maskedoff, op1, op2, vl);
3920 }
3921 
3922 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
3923 // CHECK-RV64-NEXT:  entry:
3924 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3925 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
3926 //
test_vmulhsu_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,uint64_t op2,size_t vl)3927 vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
3928   return vmulhsu(mask, maskedoff, op1, op2, vl);
3929 }
3930 
3931 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
3932 // CHECK-RV64-NEXT:  entry:
3933 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3934 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3935 //
test_vmulhsu_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vuint64m4_t op2,size_t vl)3936 vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
3937   return vmulhsu(mask, maskedoff, op1, op2, vl);
3938 }
3939 
3940 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
3941 // CHECK-RV64-NEXT:  entry:
3942 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3943 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
3944 //
test_vmulhsu_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,uint64_t op2,size_t vl)3945 vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
3946   return vmulhsu(mask, maskedoff, op1, op2, vl);
3947 }
3948 
3949 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
3950 // CHECK-RV64-NEXT:  entry:
3951 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3952 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3953 //
test_vmulhsu_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vuint64m8_t op2,size_t vl)3954 vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
3955   return vmulhsu(mask, maskedoff, op1, op2, vl);
3956 }
3957 
3958 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
3959 // CHECK-RV64-NEXT:  entry:
3960 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3961 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
3962 //
test_vmulhsu_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,uint64_t op2,size_t vl)3963 vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
3964   return vmulhsu(mask, maskedoff, op1, op2, vl);
3965 }
3966 
3967 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_mt(
3968 // CHECK-RV64-NEXT:  entry:
3969 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3970 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3971 //
test_vmul_vv_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl,size_t ta)3972 vint8mf8_t test_vmul_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) {
3973   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3974 }
3975 
3976 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_mt(
3977 // CHECK-RV64-NEXT:  entry:
3978 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3979 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
3980 //
test_vmul_vx_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl,size_t ta)3981 vint8mf8_t test_vmul_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) {
3982   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3983 }
3984 
3985 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_mt(
3986 // CHECK-RV64-NEXT:  entry:
3987 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3988 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3989 //
test_vmul_vv_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl,size_t ta)3990 vint8mf4_t test_vmul_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) {
3991   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3992 }
3993 
3994 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_mt(
3995 // CHECK-RV64-NEXT:  entry:
3996 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3997 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
3998 //
test_vmul_vx_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl,size_t ta)3999 vint8mf4_t test_vmul_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) {
4000   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4001 }
4002 
4003 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_mt(
4004 // CHECK-RV64-NEXT:  entry:
4005 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4006 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4007 //
test_vmul_vv_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl,size_t ta)4008 vint8mf2_t test_vmul_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) {
4009   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4010 }
4011 
4012 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_mt(
4013 // CHECK-RV64-NEXT:  entry:
4014 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4015 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4016 //
test_vmul_vx_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl,size_t ta)4017 vint8mf2_t test_vmul_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) {
4018   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4019 }
4020 
4021 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_mt(
4022 // CHECK-RV64-NEXT:  entry:
4023 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4024 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4025 //
test_vmul_vv_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl,size_t ta)4026 vint8m1_t test_vmul_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) {
4027   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4028 }
4029 
4030 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_mt(
4031 // CHECK-RV64-NEXT:  entry:
4032 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4033 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4034 //
test_vmul_vx_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl,size_t ta)4035 vint8m1_t test_vmul_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) {
4036   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4037 }
4038 
4039 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_mt(
4040 // CHECK-RV64-NEXT:  entry:
4041 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4042 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4043 //
test_vmul_vv_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl,size_t ta)4044 vint8m2_t test_vmul_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) {
4045   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4046 }
4047 
4048 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_mt(
4049 // CHECK-RV64-NEXT:  entry:
4050 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4051 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4052 //
test_vmul_vx_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl,size_t ta)4053 vint8m2_t test_vmul_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) {
4054   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4055 }
4056 
4057 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_mt(
4058 // CHECK-RV64-NEXT:  entry:
4059 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4060 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4061 //
test_vmul_vv_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl,size_t ta)4062 vint8m4_t test_vmul_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) {
4063   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4064 }
4065 
4066 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_mt(
4067 // CHECK-RV64-NEXT:  entry:
4068 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4069 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4070 //
test_vmul_vx_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl,size_t ta)4071 vint8m4_t test_vmul_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) {
4072   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4073 }
4074 
4075 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_mt(
4076 // CHECK-RV64-NEXT:  entry:
4077 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4078 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4079 //
test_vmul_vv_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl,size_t ta)4080 vint8m8_t test_vmul_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) {
4081   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4082 }
4083 
4084 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_mt(
4085 // CHECK-RV64-NEXT:  entry:
4086 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4087 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4088 //
test_vmul_vx_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl,size_t ta)4089 vint8m8_t test_vmul_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) {
4090   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4091 }
4092 
4093 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_mt(
4094 // CHECK-RV64-NEXT:  entry:
4095 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4096 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4097 //
test_vmul_vv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl,size_t ta)4098 vint16mf4_t test_vmul_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) {
4099   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4100 }
4101 
4102 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_mt(
4103 // CHECK-RV64-NEXT:  entry:
4104 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4105 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4106 //
test_vmul_vx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl,size_t ta)4107 vint16mf4_t test_vmul_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) {
4108   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4109 }
4110 
4111 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_mt(
4112 // CHECK-RV64-NEXT:  entry:
4113 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4114 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4115 //
test_vmul_vv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl,size_t ta)4116 vint16mf2_t test_vmul_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) {
4117   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4118 }
4119 
4120 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_mt(
4121 // CHECK-RV64-NEXT:  entry:
4122 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4123 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4124 //
test_vmul_vx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl,size_t ta)4125 vint16mf2_t test_vmul_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) {
4126   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4127 }
4128 
4129 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_mt(
4130 // CHECK-RV64-NEXT:  entry:
4131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4132 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4133 //
test_vmul_vv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl,size_t ta)4134 vint16m1_t test_vmul_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) {
4135   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4136 }
4137 
4138 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_mt(
4139 // CHECK-RV64-NEXT:  entry:
4140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4141 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4142 //
test_vmul_vx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl,size_t ta)4143 vint16m1_t test_vmul_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) {
4144   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4145 }
4146 
4147 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_mt(
4148 // CHECK-RV64-NEXT:  entry:
4149 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4150 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4151 //
test_vmul_vv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl,size_t ta)4152 vint16m2_t test_vmul_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) {
4153   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4154 }
4155 
4156 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_mt(
4157 // CHECK-RV64-NEXT:  entry:
4158 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4159 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4160 //
test_vmul_vx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl,size_t ta)4161 vint16m2_t test_vmul_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) {
4162   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4163 }
4164 
4165 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_mt(
4166 // CHECK-RV64-NEXT:  entry:
4167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4168 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4169 //
test_vmul_vv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl,size_t ta)4170 vint16m4_t test_vmul_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) {
4171   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4172 }
4173 
4174 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_mt(
4175 // CHECK-RV64-NEXT:  entry:
4176 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4177 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4178 //
test_vmul_vx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl,size_t ta)4179 vint16m4_t test_vmul_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) {
4180   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4181 }
4182 
4183 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_mt(
4184 // CHECK-RV64-NEXT:  entry:
4185 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4186 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4187 //
test_vmul_vv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl,size_t ta)4188 vint16m8_t test_vmul_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) {
4189   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4190 }
4191 
4192 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_mt(
4193 // CHECK-RV64-NEXT:  entry:
4194 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4195 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4196 //
test_vmul_vx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl,size_t ta)4197 vint16m8_t test_vmul_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) {
4198   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4199 }
4200 
4201 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_mt(
4202 // CHECK-RV64-NEXT:  entry:
4203 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4204 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
4205 //
test_vmul_vv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl,size_t ta)4206 vint32mf2_t test_vmul_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) {
4207   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4208 }
4209 
4210 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_mt(
4211 // CHECK-RV64-NEXT:  entry:
4212 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4213 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
4214 //
test_vmul_vx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl,size_t ta)4215 vint32mf2_t test_vmul_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) {
4216   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4217 }
4218 
4219 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_mt(
4220 // CHECK-RV64-NEXT:  entry:
4221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4222 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
4223 //
test_vmul_vv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl,size_t ta)4224 vint32m1_t test_vmul_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) {
4225   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4226 }
4227 
4228 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_mt(
4229 // CHECK-RV64-NEXT:  entry:
4230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4231 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
4232 //
test_vmul_vx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl,size_t ta)4233 vint32m1_t test_vmul_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) {
4234   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4235 }
4236 
4237 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_mt(
4238 // CHECK-RV64-NEXT:  entry:
4239 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4240 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
4241 //
test_vmul_vv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl,size_t ta)4242 vint32m2_t test_vmul_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) {
4243   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4244 }
4245 
4246 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_mt(
4247 // CHECK-RV64-NEXT:  entry:
4248 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4249 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
4250 //
test_vmul_vx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl,size_t ta)4251 vint32m2_t test_vmul_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) {
4252   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4253 }
4254 
4255 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_mt(
4256 // CHECK-RV64-NEXT:  entry:
4257 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4258 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
4259 //
test_vmul_vv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl,size_t ta)4260 vint32m4_t test_vmul_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) {
4261   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4262 }
4263 
4264 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_mt(
4265 // CHECK-RV64-NEXT:  entry:
4266 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4267 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
4268 //
test_vmul_vx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl,size_t ta)4269 vint32m4_t test_vmul_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) {
4270   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4271 }
4272 
4273 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_mt(
4274 // CHECK-RV64-NEXT:  entry:
4275 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4276 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
4277 //
test_vmul_vv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl,size_t ta)4278 vint32m8_t test_vmul_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) {
4279   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4280 }
4281 
4282 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_mt(
4283 // CHECK-RV64-NEXT:  entry:
4284 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4285 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
4286 //
test_vmul_vx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl,size_t ta)4287 vint32m8_t test_vmul_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) {
4288   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4289 }
4290 
4291 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_mt(
4292 // CHECK-RV64-NEXT:  entry:
4293 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4294 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
4295 //
test_vmul_vv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl,size_t ta)4296 vint64m1_t test_vmul_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) {
4297   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4298 }
4299 
4300 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_mt(
4301 // CHECK-RV64-NEXT:  entry:
4302 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4303 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
4304 //
test_vmul_vx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl,size_t ta)4305 vint64m1_t test_vmul_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) {
4306   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4307 }
4308 
4309 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_mt(
4310 // CHECK-RV64-NEXT:  entry:
4311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4312 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
4313 //
test_vmul_vv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl,size_t ta)4314 vint64m2_t test_vmul_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) {
4315   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4316 }
4317 
4318 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_mt(
4319 // CHECK-RV64-NEXT:  entry:
4320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4321 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
4322 //
test_vmul_vx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl,size_t ta)4323 vint64m2_t test_vmul_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) {
4324   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4325 }
4326 
4327 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_mt(
4328 // CHECK-RV64-NEXT:  entry:
4329 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4330 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
4331 //
test_vmul_vv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl,size_t ta)4332 vint64m4_t test_vmul_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) {
4333   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4334 }
4335 
4336 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_mt(
4337 // CHECK-RV64-NEXT:  entry:
4338 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4339 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
4340 //
test_vmul_vx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl,size_t ta)4341 vint64m4_t test_vmul_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) {
4342   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4343 }
4344 
4345 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_mt(
4346 // CHECK-RV64-NEXT:  entry:
4347 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4348 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
4349 //
test_vmul_vv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl,size_t ta)4350 vint64m8_t test_vmul_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) {
4351   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4352 }
4353 
4354 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_mt(
4355 // CHECK-RV64-NEXT:  entry:
4356 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4357 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
4358 //
test_vmul_vx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl,size_t ta)4359 vint64m8_t test_vmul_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) {
4360   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4361 }
4362 
4363 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_mt(
4364 // CHECK-RV64-NEXT:  entry:
4365 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4366 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
4367 //
test_vmul_vv_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl,size_t ta)4368 vuint8mf8_t test_vmul_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
4369   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4370 }
4371 
4372 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_mt(
4373 // CHECK-RV64-NEXT:  entry:
4374 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4375 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
4376 //
test_vmul_vx_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl,size_t ta)4377 vuint8mf8_t test_vmul_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) {
4378   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4379 }
4380 
4381 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_mt(
4382 // CHECK-RV64-NEXT:  entry:
4383 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4384 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
4385 //
test_vmul_vv_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl,size_t ta)4386 vuint8mf4_t test_vmul_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
4387   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4388 }
4389 
4390 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_mt(
4391 // CHECK-RV64-NEXT:  entry:
4392 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4393 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
4394 //
test_vmul_vx_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl,size_t ta)4395 vuint8mf4_t test_vmul_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
4396   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4397 }
4398 
4399 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_mt(
4400 // CHECK-RV64-NEXT:  entry:
4401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4402 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4403 //
test_vmul_vv_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl,size_t ta)4404 vuint8mf2_t test_vmul_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
4405   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4406 }
4407 
4408 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_mt(
4409 // CHECK-RV64-NEXT:  entry:
4410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4411 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4412 //
test_vmul_vx_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl,size_t ta)4413 vuint8mf2_t test_vmul_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
4414   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4415 }
4416 
4417 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_mt(
4418 // CHECK-RV64-NEXT:  entry:
4419 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4420 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4421 //
test_vmul_vv_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl,size_t ta)4422 vuint8m1_t test_vmul_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
4423   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4424 }
4425 
4426 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_mt(
4427 // CHECK-RV64-NEXT:  entry:
4428 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4429 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4430 //
test_vmul_vx_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl,size_t ta)4431 vuint8m1_t test_vmul_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) {
4432   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4433 }
4434 
4435 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_mt(
4436 // CHECK-RV64-NEXT:  entry:
4437 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4438 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4439 //
test_vmul_vv_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl,size_t ta)4440 vuint8m2_t test_vmul_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
4441   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4442 }
4443 
4444 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_mt(
4445 // CHECK-RV64-NEXT:  entry:
4446 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4447 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4448 //
test_vmul_vx_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl,size_t ta)4449 vuint8m2_t test_vmul_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) {
4450   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4451 }
4452 
4453 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_mt(
4454 // CHECK-RV64-NEXT:  entry:
4455 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4456 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4457 //
test_vmul_vv_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl,size_t ta)4458 vuint8m4_t test_vmul_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
4459   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4460 }
4461 
4462 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_mt(
4463 // CHECK-RV64-NEXT:  entry:
4464 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4465 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4466 //
test_vmul_vx_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl,size_t ta)4467 vuint8m4_t test_vmul_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) {
4468   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4469 }
4470 
4471 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_mt(
4472 // CHECK-RV64-NEXT:  entry:
4473 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4474 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4475 //
test_vmul_vv_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl,size_t ta)4476 vuint8m8_t test_vmul_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) {
4477   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4478 }
4479 
4480 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_mt(
4481 // CHECK-RV64-NEXT:  entry:
4482 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4483 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4484 //
test_vmul_vx_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl,size_t ta)4485 vuint8m8_t test_vmul_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) {
4486   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4487 }
4488 
4489 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_mt(
4490 // CHECK-RV64-NEXT:  entry:
4491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4492 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4493 //
test_vmul_vv_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl,size_t ta)4494 vuint16mf4_t test_vmul_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
4495   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4496 }
4497 
4498 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_mt(
4499 // CHECK-RV64-NEXT:  entry:
4500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4501 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4502 //
test_vmul_vx_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl,size_t ta)4503 vuint16mf4_t test_vmul_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) {
4504   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4505 }
4506 
4507 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_mt(
4508 // CHECK-RV64-NEXT:  entry:
4509 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4510 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4511 //
test_vmul_vv_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl,size_t ta)4512 vuint16mf2_t test_vmul_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
4513   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4514 }
4515 
4516 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_mt(
4517 // CHECK-RV64-NEXT:  entry:
4518 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4519 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4520 //
test_vmul_vx_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl,size_t ta)4521 vuint16mf2_t test_vmul_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
4522   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4523 }
4524 
4525 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_mt(
4526 // CHECK-RV64-NEXT:  entry:
4527 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4528 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4529 //
test_vmul_vv_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl,size_t ta)4530 vuint16m1_t test_vmul_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
4531   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4532 }
4533 
4534 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_mt(
4535 // CHECK-RV64-NEXT:  entry:
4536 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4537 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4538 //
test_vmul_vx_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl,size_t ta)4539 vuint16m1_t test_vmul_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) {
4540   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4541 }
4542 
4543 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_mt(
4544 // CHECK-RV64-NEXT:  entry:
4545 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4546 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4547 //
test_vmul_vv_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl,size_t ta)4548 vuint16m2_t test_vmul_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
4549   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4550 }
4551 
4552 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_mt(
4553 // CHECK-RV64-NEXT:  entry:
4554 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4555 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4556 //
test_vmul_vx_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl,size_t ta)4557 vuint16m2_t test_vmul_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) {
4558   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4559 }
4560 
4561 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_mt(
4562 // CHECK-RV64-NEXT:  entry:
4563 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4564 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4565 //
test_vmul_vv_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl,size_t ta)4566 vuint16m4_t test_vmul_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
4567   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4568 }
4569 
4570 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_mt(
4571 // CHECK-RV64-NEXT:  entry:
4572 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4573 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4574 //
test_vmul_vx_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl,size_t ta)4575 vuint16m4_t test_vmul_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) {
4576   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4577 }
4578 
4579 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_mt(
4580 // CHECK-RV64-NEXT:  entry:
4581 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4582 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4583 //
test_vmul_vv_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl,size_t ta)4584 vuint16m8_t test_vmul_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) {
4585   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4586 }
4587 
4588 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_mt(
4589 // CHECK-RV64-NEXT:  entry:
4590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4591 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4592 //
test_vmul_vx_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl,size_t ta)4593 vuint16m8_t test_vmul_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) {
4594   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4595 }
4596 
4597 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_mt(
4598 // CHECK-RV64-NEXT:  entry:
4599 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4600 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
4601 //
test_vmul_vv_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl,size_t ta)4602 vuint32mf2_t test_vmul_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
4603   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4604 }
4605 
4606 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_mt(
4607 // CHECK-RV64-NEXT:  entry:
4608 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4609 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
4610 //
test_vmul_vx_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl,size_t ta)4611 vuint32mf2_t test_vmul_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) {
4612   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4613 }
4614 
4615 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_mt(
4616 // CHECK-RV64-NEXT:  entry:
4617 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4618 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
4619 //
test_vmul_vv_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl,size_t ta)4620 vuint32m1_t test_vmul_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
4621   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4622 }
4623 
4624 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_mt(
4625 // CHECK-RV64-NEXT:  entry:
4626 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4627 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
4628 //
test_vmul_vx_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl,size_t ta)4629 vuint32m1_t test_vmul_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) {
4630   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4631 }
4632 
4633 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_mt(
4634 // CHECK-RV64-NEXT:  entry:
4635 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4636 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
4637 //
test_vmul_vv_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl,size_t ta)4638 vuint32m2_t test_vmul_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
4639   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4640 }
4641 
4642 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_mt(
4643 // CHECK-RV64-NEXT:  entry:
4644 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4645 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
4646 //
test_vmul_vx_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl,size_t ta)4647 vuint32m2_t test_vmul_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) {
4648   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4649 }
4650 
4651 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_mt(
4652 // CHECK-RV64-NEXT:  entry:
4653 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4654 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
4655 //
test_vmul_vv_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl,size_t ta)4656 vuint32m4_t test_vmul_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
4657   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4658 }
4659 
4660 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_mt(
4661 // CHECK-RV64-NEXT:  entry:
4662 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4663 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
4664 //
test_vmul_vx_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl,size_t ta)4665 vuint32m4_t test_vmul_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) {
4666   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4667 }
4668 
4669 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_mt(
4670 // CHECK-RV64-NEXT:  entry:
4671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4672 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
4673 //
test_vmul_vv_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl,size_t ta)4674 vuint32m8_t test_vmul_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) {
4675   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4676 }
4677 
4678 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_mt(
4679 // CHECK-RV64-NEXT:  entry:
4680 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4681 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
4682 //
test_vmul_vx_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl,size_t ta)4683 vuint32m8_t test_vmul_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) {
4684   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4685 }
4686 
4687 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_mt(
4688 // CHECK-RV64-NEXT:  entry:
4689 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4690 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
4691 //
test_vmul_vv_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl,size_t ta)4692 vuint64m1_t test_vmul_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) {
4693   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4694 }
4695 
4696 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_mt(
4697 // CHECK-RV64-NEXT:  entry:
4698 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4699 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
4700 //
test_vmul_vx_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl,size_t ta)4701 vuint64m1_t test_vmul_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) {
4702   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4703 }
4704 
4705 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_mt(
4706 // CHECK-RV64-NEXT:  entry:
4707 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4708 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
4709 //
test_vmul_vv_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl,size_t ta)4710 vuint64m2_t test_vmul_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) {
4711   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4712 }
4713 
4714 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_mt(
4715 // CHECK-RV64-NEXT:  entry:
4716 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4717 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
4718 //
test_vmul_vx_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl,size_t ta)4719 vuint64m2_t test_vmul_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) {
4720   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4721 }
4722 
4723 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_mt(
4724 // CHECK-RV64-NEXT:  entry:
4725 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4726 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
4727 //
test_vmul_vv_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl,size_t ta)4728 vuint64m4_t test_vmul_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) {
4729   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4730 }
4731 
4732 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_mt(
4733 // CHECK-RV64-NEXT:  entry:
4734 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4735 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
4736 //
test_vmul_vx_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl,size_t ta)4737 vuint64m4_t test_vmul_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) {
4738   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4739 }
4740 
4741 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_mt(
4742 // CHECK-RV64-NEXT:  entry:
4743 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4744 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
4745 //
test_vmul_vv_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl,size_t ta)4746 vuint64m8_t test_vmul_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) {
4747   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4748 }
4749 
4750 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_mt(
4751 // CHECK-RV64-NEXT:  entry:
4752 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4753 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
4754 //
test_vmul_vx_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl,size_t ta)4755 vuint64m8_t test_vmul_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) {
4756   return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4757 }
4758 
4759 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_mt(
4760 // CHECK-RV64-NEXT:  entry:
4761 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4762 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
4763 //
test_vmulh_vv_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl,size_t ta)4764 vint8mf8_t test_vmulh_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) {
4765   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4766 }
4767 
4768 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_mt(
4769 // CHECK-RV64-NEXT:  entry:
4770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4771 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
4772 //
test_vmulh_vx_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl,size_t ta)4773 vint8mf8_t test_vmulh_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) {
4774   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4775 }
4776 
4777 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_mt(
4778 // CHECK-RV64-NEXT:  entry:
4779 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4780 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
4781 //
test_vmulh_vv_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl,size_t ta)4782 vint8mf4_t test_vmulh_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) {
4783   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4784 }
4785 
4786 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_mt(
4787 // CHECK-RV64-NEXT:  entry:
4788 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4789 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
4790 //
test_vmulh_vx_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl,size_t ta)4791 vint8mf4_t test_vmulh_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) {
4792   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4793 }
4794 
4795 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_mt(
4796 // CHECK-RV64-NEXT:  entry:
4797 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4798 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4799 //
test_vmulh_vv_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl,size_t ta)4800 vint8mf2_t test_vmulh_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) {
4801   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4802 }
4803 
4804 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_mt(
4805 // CHECK-RV64-NEXT:  entry:
4806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4807 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
4808 //
test_vmulh_vx_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl,size_t ta)4809 vint8mf2_t test_vmulh_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) {
4810   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4811 }
4812 
4813 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_mt(
4814 // CHECK-RV64-NEXT:  entry:
4815 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4816 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4817 //
test_vmulh_vv_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl,size_t ta)4818 vint8m1_t test_vmulh_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) {
4819   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4820 }
4821 
4822 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_mt(
4823 // CHECK-RV64-NEXT:  entry:
4824 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4825 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
4826 //
test_vmulh_vx_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl,size_t ta)4827 vint8m1_t test_vmulh_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) {
4828   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4829 }
4830 
4831 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_mt(
4832 // CHECK-RV64-NEXT:  entry:
4833 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4834 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4835 //
test_vmulh_vv_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl,size_t ta)4836 vint8m2_t test_vmulh_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) {
4837   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4838 }
4839 
4840 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_mt(
4841 // CHECK-RV64-NEXT:  entry:
4842 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4843 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
4844 //
test_vmulh_vx_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl,size_t ta)4845 vint8m2_t test_vmulh_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) {
4846   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4847 }
4848 
4849 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_mt(
4850 // CHECK-RV64-NEXT:  entry:
4851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4852 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4853 //
test_vmulh_vv_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl,size_t ta)4854 vint8m4_t test_vmulh_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) {
4855   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4856 }
4857 
4858 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_mt(
4859 // CHECK-RV64-NEXT:  entry:
4860 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4861 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
4862 //
test_vmulh_vx_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl,size_t ta)4863 vint8m4_t test_vmulh_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) {
4864   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4865 }
4866 
4867 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_mt(
4868 // CHECK-RV64-NEXT:  entry:
4869 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4870 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4871 //
test_vmulh_vv_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl,size_t ta)4872 vint8m8_t test_vmulh_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) {
4873   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4874 }
4875 
4876 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_mt(
4877 // CHECK-RV64-NEXT:  entry:
4878 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4879 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
4880 //
test_vmulh_vx_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl,size_t ta)4881 vint8m8_t test_vmulh_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) {
4882   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4883 }
4884 
4885 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_mt(
4886 // CHECK-RV64-NEXT:  entry:
4887 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4888 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4889 //
test_vmulh_vv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl,size_t ta)4890 vint16mf4_t test_vmulh_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) {
4891   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4892 }
4893 
4894 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_mt(
4895 // CHECK-RV64-NEXT:  entry:
4896 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4897 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
4898 //
test_vmulh_vx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl,size_t ta)4899 vint16mf4_t test_vmulh_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) {
4900   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4901 }
4902 
4903 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_mt(
4904 // CHECK-RV64-NEXT:  entry:
4905 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4906 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4907 //
test_vmulh_vv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl,size_t ta)4908 vint16mf2_t test_vmulh_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) {
4909   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4910 }
4911 
4912 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_mt(
4913 // CHECK-RV64-NEXT:  entry:
4914 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4915 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
4916 //
test_vmulh_vx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl,size_t ta)4917 vint16mf2_t test_vmulh_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) {
4918   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4919 }
4920 
4921 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_mt(
4922 // CHECK-RV64-NEXT:  entry:
4923 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4924 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4925 //
test_vmulh_vv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl,size_t ta)4926 vint16m1_t test_vmulh_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) {
4927   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4928 }
4929 
4930 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_mt(
4931 // CHECK-RV64-NEXT:  entry:
4932 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4933 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
4934 //
test_vmulh_vx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl,size_t ta)4935 vint16m1_t test_vmulh_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) {
4936   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4937 }
4938 
4939 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_mt(
4940 // CHECK-RV64-NEXT:  entry:
4941 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4942 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4943 //
test_vmulh_vv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl,size_t ta)4944 vint16m2_t test_vmulh_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) {
4945   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4946 }
4947 
4948 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_mt(
4949 // CHECK-RV64-NEXT:  entry:
4950 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4951 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
4952 //
test_vmulh_vx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl,size_t ta)4953 vint16m2_t test_vmulh_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) {
4954   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4955 }
4956 
4957 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_mt(
4958 // CHECK-RV64-NEXT:  entry:
4959 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4960 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4961 //
test_vmulh_vv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl,size_t ta)4962 vint16m4_t test_vmulh_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) {
4963   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4964 }
4965 
4966 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_mt(
4967 // CHECK-RV64-NEXT:  entry:
4968 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4969 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
4970 //
test_vmulh_vx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl,size_t ta)4971 vint16m4_t test_vmulh_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) {
4972   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4973 }
4974 
4975 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_mt(
4976 // CHECK-RV64-NEXT:  entry:
4977 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4978 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4979 //
test_vmulh_vv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl,size_t ta)4980 vint16m8_t test_vmulh_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) {
4981   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4982 }
4983 
4984 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_mt(
4985 // CHECK-RV64-NEXT:  entry:
4986 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4987 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
4988 //
test_vmulh_vx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl,size_t ta)4989 vint16m8_t test_vmulh_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) {
4990   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
4991 }
4992 
4993 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_mt(
4994 // CHECK-RV64-NEXT:  entry:
4995 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
4996 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
4997 //
test_vmulh_vv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl,size_t ta)4998 vint32mf2_t test_vmulh_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) {
4999   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5000 }
5001 
5002 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_mt(
5003 // CHECK-RV64-NEXT:  entry:
5004 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5005 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
5006 //
test_vmulh_vx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl,size_t ta)5007 vint32mf2_t test_vmulh_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) {
5008   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5009 }
5010 
5011 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_mt(
5012 // CHECK-RV64-NEXT:  entry:
5013 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5014 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5015 //
test_vmulh_vv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl,size_t ta)5016 vint32m1_t test_vmulh_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) {
5017   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5018 }
5019 
5020 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_mt(
5021 // CHECK-RV64-NEXT:  entry:
5022 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5023 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5024 //
test_vmulh_vx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl,size_t ta)5025 vint32m1_t test_vmulh_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) {
5026   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5027 }
5028 
5029 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_mt(
5030 // CHECK-RV64-NEXT:  entry:
5031 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5032 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5033 //
test_vmulh_vv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl,size_t ta)5034 vint32m2_t test_vmulh_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) {
5035   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5036 }
5037 
5038 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_mt(
5039 // CHECK-RV64-NEXT:  entry:
5040 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5041 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5042 //
test_vmulh_vx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl,size_t ta)5043 vint32m2_t test_vmulh_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) {
5044   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5045 }
5046 
5047 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_mt(
5048 // CHECK-RV64-NEXT:  entry:
5049 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5050 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5051 //
test_vmulh_vv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl,size_t ta)5052 vint32m4_t test_vmulh_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) {
5053   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5054 }
5055 
5056 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_mt(
5057 // CHECK-RV64-NEXT:  entry:
5058 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5059 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5060 //
test_vmulh_vx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl,size_t ta)5061 vint32m4_t test_vmulh_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) {
5062   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5063 }
5064 
5065 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_mt(
5066 // CHECK-RV64-NEXT:  entry:
5067 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5068 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5069 //
test_vmulh_vv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl,size_t ta)5070 vint32m8_t test_vmulh_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) {
5071   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5072 }
5073 
5074 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_mt(
5075 // CHECK-RV64-NEXT:  entry:
5076 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5077 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5078 //
test_vmulh_vx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl,size_t ta)5079 vint32m8_t test_vmulh_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) {
5080   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5081 }
5082 
5083 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_mt(
5084 // CHECK-RV64-NEXT:  entry:
5085 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5086 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5087 //
test_vmulh_vv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl,size_t ta)5088 vint64m1_t test_vmulh_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) {
5089   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5090 }
5091 
5092 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_mt(
5093 // CHECK-RV64-NEXT:  entry:
5094 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5095 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5096 //
test_vmulh_vx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl,size_t ta)5097 vint64m1_t test_vmulh_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) {
5098   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5099 }
5100 
5101 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_mt(
5102 // CHECK-RV64-NEXT:  entry:
5103 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5104 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5105 //
test_vmulh_vv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl,size_t ta)5106 vint64m2_t test_vmulh_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) {
5107   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5108 }
5109 
5110 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_mt(
5111 // CHECK-RV64-NEXT:  entry:
5112 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5113 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5114 //
test_vmulh_vx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl,size_t ta)5115 vint64m2_t test_vmulh_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) {
5116   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5117 }
5118 
5119 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_mt(
5120 // CHECK-RV64-NEXT:  entry:
5121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5122 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5123 //
test_vmulh_vv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl,size_t ta)5124 vint64m4_t test_vmulh_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) {
5125   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5126 }
5127 
5128 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_mt(
5129 // CHECK-RV64-NEXT:  entry:
5130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5131 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5132 //
test_vmulh_vx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl,size_t ta)5133 vint64m4_t test_vmulh_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) {
5134   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5135 }
5136 
5137 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_mt(
5138 // CHECK-RV64-NEXT:  entry:
5139 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5140 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5141 //
test_vmulh_vv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl,size_t ta)5142 vint64m8_t test_vmulh_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) {
5143   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5144 }
5145 
5146 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_mt(
5147 // CHECK-RV64-NEXT:  entry:
5148 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5149 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5150 //
test_vmulh_vx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl,size_t ta)5151 vint64m8_t test_vmulh_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) {
5152   return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5153 }
5154 
5155 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_mt(
5156 // CHECK-RV64-NEXT:  entry:
5157 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5158 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
5159 //
test_vmulhu_vv_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl,size_t ta)5160 vuint8mf8_t test_vmulhu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
5161   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5162 }
5163 
5164 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_mt(
5165 // CHECK-RV64-NEXT:  entry:
5166 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5167 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
5168 //
test_vmulhu_vx_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl,size_t ta)5169 vuint8mf8_t test_vmulhu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) {
5170   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5171 }
5172 
5173 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_mt(
5174 // CHECK-RV64-NEXT:  entry:
5175 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5176 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
5177 //
test_vmulhu_vv_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl,size_t ta)5178 vuint8mf4_t test_vmulhu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
5179   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5180 }
5181 
5182 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_mt(
5183 // CHECK-RV64-NEXT:  entry:
5184 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5185 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
5186 //
test_vmulhu_vx_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl,size_t ta)5187 vuint8mf4_t test_vmulhu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
5188   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5189 }
5190 
5191 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_mt(
5192 // CHECK-RV64-NEXT:  entry:
5193 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5194 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
5195 //
test_vmulhu_vv_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl,size_t ta)5196 vuint8mf2_t test_vmulhu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
5197   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5198 }
5199 
5200 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_mt(
5201 // CHECK-RV64-NEXT:  entry:
5202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5203 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
5204 //
test_vmulhu_vx_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl,size_t ta)5205 vuint8mf2_t test_vmulhu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
5206   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5207 }
5208 
5209 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_mt(
5210 // CHECK-RV64-NEXT:  entry:
5211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5212 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
5213 //
test_vmulhu_vv_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl,size_t ta)5214 vuint8m1_t test_vmulhu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
5215   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5216 }
5217 
5218 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_mt(
5219 // CHECK-RV64-NEXT:  entry:
5220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
5222 //
test_vmulhu_vx_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl,size_t ta)5223 vuint8m1_t test_vmulhu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) {
5224   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5225 }
5226 
5227 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_mt(
5228 // CHECK-RV64-NEXT:  entry:
5229 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5230 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
5231 //
test_vmulhu_vv_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl,size_t ta)5232 vuint8m2_t test_vmulhu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
5233   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5234 }
5235 
5236 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_mt(
5237 // CHECK-RV64-NEXT:  entry:
5238 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5239 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
5240 //
test_vmulhu_vx_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl,size_t ta)5241 vuint8m2_t test_vmulhu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) {
5242   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5243 }
5244 
5245 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_mt(
5246 // CHECK-RV64-NEXT:  entry:
5247 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5248 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
5249 //
test_vmulhu_vv_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl,size_t ta)5250 vuint8m4_t test_vmulhu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
5251   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5252 }
5253 
5254 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_mt(
5255 // CHECK-RV64-NEXT:  entry:
5256 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5257 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
5258 //
test_vmulhu_vx_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl,size_t ta)5259 vuint8m4_t test_vmulhu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) {
5260   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5261 }
5262 
5263 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_mt(
5264 // CHECK-RV64-NEXT:  entry:
5265 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5266 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
5267 //
test_vmulhu_vv_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl,size_t ta)5268 vuint8m8_t test_vmulhu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) {
5269   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5270 }
5271 
5272 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_mt(
5273 // CHECK-RV64-NEXT:  entry:
5274 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5275 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
5276 //
test_vmulhu_vx_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl,size_t ta)5277 vuint8m8_t test_vmulhu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) {
5278   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5279 }
5280 
5281 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_mt(
5282 // CHECK-RV64-NEXT:  entry:
5283 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5284 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
5285 //
test_vmulhu_vv_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl,size_t ta)5286 vuint16mf4_t test_vmulhu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
5287   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5288 }
5289 
5290 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_mt(
5291 // CHECK-RV64-NEXT:  entry:
5292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5293 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
5294 //
test_vmulhu_vx_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl,size_t ta)5295 vuint16mf4_t test_vmulhu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) {
5296   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5297 }
5298 
5299 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_mt(
5300 // CHECK-RV64-NEXT:  entry:
5301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5302 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
5303 //
test_vmulhu_vv_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl,size_t ta)5304 vuint16mf2_t test_vmulhu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
5305   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5306 }
5307 
5308 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_mt(
5309 // CHECK-RV64-NEXT:  entry:
5310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5311 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
5312 //
test_vmulhu_vx_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl,size_t ta)5313 vuint16mf2_t test_vmulhu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
5314   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5315 }
5316 
5317 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_mt(
5318 // CHECK-RV64-NEXT:  entry:
5319 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5320 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
5321 //
test_vmulhu_vv_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl,size_t ta)5322 vuint16m1_t test_vmulhu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
5323   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5324 }
5325 
5326 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_mt(
5327 // CHECK-RV64-NEXT:  entry:
5328 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5329 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
5330 //
test_vmulhu_vx_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl,size_t ta)5331 vuint16m1_t test_vmulhu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) {
5332   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5333 }
5334 
5335 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_mt(
5336 // CHECK-RV64-NEXT:  entry:
5337 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5338 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
5339 //
test_vmulhu_vv_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl,size_t ta)5340 vuint16m2_t test_vmulhu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
5341   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5342 }
5343 
5344 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_mt(
5345 // CHECK-RV64-NEXT:  entry:
5346 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5347 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
5348 //
test_vmulhu_vx_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl,size_t ta)5349 vuint16m2_t test_vmulhu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) {
5350   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5351 }
5352 
5353 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_mt(
5354 // CHECK-RV64-NEXT:  entry:
5355 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5356 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
5357 //
test_vmulhu_vv_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl,size_t ta)5358 vuint16m4_t test_vmulhu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
5359   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5360 }
5361 
5362 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_mt(
5363 // CHECK-RV64-NEXT:  entry:
5364 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5365 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
5366 //
test_vmulhu_vx_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl,size_t ta)5367 vuint16m4_t test_vmulhu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) {
5368   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5369 }
5370 
5371 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_mt(
5372 // CHECK-RV64-NEXT:  entry:
5373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5374 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
5375 //
test_vmulhu_vv_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl,size_t ta)5376 vuint16m8_t test_vmulhu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) {
5377   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5378 }
5379 
5380 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_mt(
5381 // CHECK-RV64-NEXT:  entry:
5382 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5383 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
5384 //
test_vmulhu_vx_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl,size_t ta)5385 vuint16m8_t test_vmulhu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) {
5386   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5387 }
5388 
5389 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_mt(
5390 // CHECK-RV64-NEXT:  entry:
5391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5392 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
5393 //
test_vmulhu_vv_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl,size_t ta)5394 vuint32mf2_t test_vmulhu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
5395   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5396 }
5397 
5398 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_mt(
5399 // CHECK-RV64-NEXT:  entry:
5400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5401 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
5402 //
test_vmulhu_vx_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl,size_t ta)5403 vuint32mf2_t test_vmulhu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) {
5404   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5405 }
5406 
5407 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_mt(
5408 // CHECK-RV64-NEXT:  entry:
5409 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5410 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5411 //
test_vmulhu_vv_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl,size_t ta)5412 vuint32m1_t test_vmulhu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
5413   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5414 }
5415 
5416 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_mt(
5417 // CHECK-RV64-NEXT:  entry:
5418 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5419 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5420 //
test_vmulhu_vx_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl,size_t ta)5421 vuint32m1_t test_vmulhu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) {
5422   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5423 }
5424 
5425 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_mt(
5426 // CHECK-RV64-NEXT:  entry:
5427 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5428 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5429 //
test_vmulhu_vv_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl,size_t ta)5430 vuint32m2_t test_vmulhu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
5431   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5432 }
5433 
5434 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_mt(
5435 // CHECK-RV64-NEXT:  entry:
5436 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5437 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5438 //
test_vmulhu_vx_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl,size_t ta)5439 vuint32m2_t test_vmulhu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) {
5440   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5441 }
5442 
5443 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_mt(
5444 // CHECK-RV64-NEXT:  entry:
5445 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5446 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5447 //
test_vmulhu_vv_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl,size_t ta)5448 vuint32m4_t test_vmulhu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
5449   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5450 }
5451 
5452 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_mt(
5453 // CHECK-RV64-NEXT:  entry:
5454 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5455 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5456 //
test_vmulhu_vx_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl,size_t ta)5457 vuint32m4_t test_vmulhu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) {
5458   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5459 }
5460 
5461 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_mt(
5462 // CHECK-RV64-NEXT:  entry:
5463 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5464 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5465 //
test_vmulhu_vv_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl,size_t ta)5466 vuint32m8_t test_vmulhu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) {
5467   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5468 }
5469 
5470 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_mt(
5471 // CHECK-RV64-NEXT:  entry:
5472 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5473 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5474 //
test_vmulhu_vx_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl,size_t ta)5475 vuint32m8_t test_vmulhu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) {
5476   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5477 }
5478 
5479 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_mt(
5480 // CHECK-RV64-NEXT:  entry:
5481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5482 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5483 //
test_vmulhu_vv_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl,size_t ta)5484 vuint64m1_t test_vmulhu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) {
5485   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5486 }
5487 
5488 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_mt(
5489 // CHECK-RV64-NEXT:  entry:
5490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5491 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5492 //
test_vmulhu_vx_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl,size_t ta)5493 vuint64m1_t test_vmulhu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) {
5494   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5495 }
5496 
5497 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_mt(
5498 // CHECK-RV64-NEXT:  entry:
5499 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5500 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5501 //
test_vmulhu_vv_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl,size_t ta)5502 vuint64m2_t test_vmulhu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) {
5503   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5504 }
5505 
5506 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_mt(
5507 // CHECK-RV64-NEXT:  entry:
5508 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5509 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5510 //
test_vmulhu_vx_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl,size_t ta)5511 vuint64m2_t test_vmulhu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) {
5512   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5513 }
5514 
5515 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_mt(
5516 // CHECK-RV64-NEXT:  entry:
5517 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5518 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5519 //
test_vmulhu_vv_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl,size_t ta)5520 vuint64m4_t test_vmulhu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) {
5521   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5522 }
5523 
5524 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_mt(
5525 // CHECK-RV64-NEXT:  entry:
5526 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5527 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5528 //
test_vmulhu_vx_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl,size_t ta)5529 vuint64m4_t test_vmulhu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) {
5530   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5531 }
5532 
5533 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_mt(
5534 // CHECK-RV64-NEXT:  entry:
5535 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5536 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5537 //
test_vmulhu_vv_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl,size_t ta)5538 vuint64m8_t test_vmulhu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) {
5539   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5540 }
5541 
5542 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_mt(
5543 // CHECK-RV64-NEXT:  entry:
5544 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5545 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5546 //
test_vmulhu_vx_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl,size_t ta)5547 vuint64m8_t test_vmulhu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) {
5548   return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5549 }
5550 
5551 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_mt(
5552 // CHECK-RV64-NEXT:  entry:
5553 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5554 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
5555 //
test_vmulhsu_vv_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vuint8mf8_t op2,size_t vl,size_t ta)5556 vint8mf8_t test_vmulhsu_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
5557   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5558 }
5559 
5560 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_mt(
5561 // CHECK-RV64-NEXT:  entry:
5562 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5563 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
5564 //
test_vmulhsu_vx_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,uint8_t op2,size_t vl,size_t ta)5565 vint8mf8_t test_vmulhsu_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) {
5566   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5567 }
5568 
5569 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_mt(
5570 // CHECK-RV64-NEXT:  entry:
5571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5572 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
5573 //
test_vmulhsu_vv_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vuint8mf4_t op2,size_t vl,size_t ta)5574 vint8mf4_t test_vmulhsu_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
5575   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5576 }
5577 
5578 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_mt(
5579 // CHECK-RV64-NEXT:  entry:
5580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5581 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
5582 //
test_vmulhsu_vx_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,uint8_t op2,size_t vl,size_t ta)5583 vint8mf4_t test_vmulhsu_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
5584   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5585 }
5586 
5587 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_mt(
5588 // CHECK-RV64-NEXT:  entry:
5589 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5590 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
5591 //
test_vmulhsu_vv_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vuint8mf2_t op2,size_t vl,size_t ta)5592 vint8mf2_t test_vmulhsu_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
5593   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5594 }
5595 
5596 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_mt(
5597 // CHECK-RV64-NEXT:  entry:
5598 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5599 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
5600 //
test_vmulhsu_vx_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,uint8_t op2,size_t vl,size_t ta)5601 vint8mf2_t test_vmulhsu_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
5602   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5603 }
5604 
5605 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_mt(
5606 // CHECK-RV64-NEXT:  entry:
5607 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5608 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
5609 //
test_vmulhsu_vv_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vuint8m1_t op2,size_t vl,size_t ta)5610 vint8m1_t test_vmulhsu_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
5611   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5612 }
5613 
5614 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_mt(
5615 // CHECK-RV64-NEXT:  entry:
5616 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5617 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
5618 //
test_vmulhsu_vx_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,uint8_t op2,size_t vl,size_t ta)5619 vint8m1_t test_vmulhsu_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl, size_t ta) {
5620   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5621 }
5622 
5623 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_mt(
5624 // CHECK-RV64-NEXT:  entry:
5625 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5626 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
5627 //
test_vmulhsu_vv_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vuint8m2_t op2,size_t vl,size_t ta)5628 vint8m2_t test_vmulhsu_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
5629   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5630 }
5631 
5632 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_mt(
5633 // CHECK-RV64-NEXT:  entry:
5634 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5635 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
5636 //
test_vmulhsu_vx_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,uint8_t op2,size_t vl,size_t ta)5637 vint8m2_t test_vmulhsu_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl, size_t ta) {
5638   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5639 }
5640 
5641 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_mt(
5642 // CHECK-RV64-NEXT:  entry:
5643 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5644 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
5645 //
test_vmulhsu_vv_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vuint8m4_t op2,size_t vl,size_t ta)5646 vint8m4_t test_vmulhsu_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
5647   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5648 }
5649 
5650 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_mt(
5651 // CHECK-RV64-NEXT:  entry:
5652 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5653 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
5654 //
test_vmulhsu_vx_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,uint8_t op2,size_t vl,size_t ta)5655 vint8m4_t test_vmulhsu_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl, size_t ta) {
5656   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5657 }
5658 
5659 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_mt(
5660 // CHECK-RV64-NEXT:  entry:
5661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5662 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
5663 //
test_vmulhsu_vv_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vuint8m8_t op2,size_t vl,size_t ta)5664 vint8m8_t test_vmulhsu_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) {
5665   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5666 }
5667 
5668 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_mt(
5669 // CHECK-RV64-NEXT:  entry:
5670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5671 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
5672 //
test_vmulhsu_vx_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,uint8_t op2,size_t vl,size_t ta)5673 vint8m8_t test_vmulhsu_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl, size_t ta) {
5674   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5675 }
5676 
5677 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_mt(
5678 // CHECK-RV64-NEXT:  entry:
5679 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5680 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
5681 //
test_vmulhsu_vv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vuint16mf4_t op2,size_t vl,size_t ta)5682 vint16mf4_t test_vmulhsu_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
5683   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5684 }
5685 
5686 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_mt(
5687 // CHECK-RV64-NEXT:  entry:
5688 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5689 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
5690 //
test_vmulhsu_vx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,uint16_t op2,size_t vl,size_t ta)5691 vint16mf4_t test_vmulhsu_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) {
5692   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5693 }
5694 
5695 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_mt(
5696 // CHECK-RV64-NEXT:  entry:
5697 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5698 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
5699 //
test_vmulhsu_vv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vuint16mf2_t op2,size_t vl,size_t ta)5700 vint16mf2_t test_vmulhsu_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
5701   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5702 }
5703 
5704 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_mt(
5705 // CHECK-RV64-NEXT:  entry:
5706 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5707 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
5708 //
test_vmulhsu_vx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,uint16_t op2,size_t vl,size_t ta)5709 vint16mf2_t test_vmulhsu_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
5710   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5711 }
5712 
5713 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_mt(
5714 // CHECK-RV64-NEXT:  entry:
5715 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5716 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
5717 //
test_vmulhsu_vv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vuint16m1_t op2,size_t vl,size_t ta)5718 vint16m1_t test_vmulhsu_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
5719   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5720 }
5721 
5722 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_mt(
5723 // CHECK-RV64-NEXT:  entry:
5724 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5725 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
5726 //
test_vmulhsu_vx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,uint16_t op2,size_t vl,size_t ta)5727 vint16m1_t test_vmulhsu_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl, size_t ta) {
5728   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5729 }
5730 
5731 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_mt(
5732 // CHECK-RV64-NEXT:  entry:
5733 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5734 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
5735 //
test_vmulhsu_vv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vuint16m2_t op2,size_t vl,size_t ta)5736 vint16m2_t test_vmulhsu_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
5737   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5738 }
5739 
5740 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_mt(
5741 // CHECK-RV64-NEXT:  entry:
5742 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5743 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
5744 //
test_vmulhsu_vx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,uint16_t op2,size_t vl,size_t ta)5745 vint16m2_t test_vmulhsu_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl, size_t ta) {
5746   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5747 }
5748 
5749 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_mt(
5750 // CHECK-RV64-NEXT:  entry:
5751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5752 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
5753 //
test_vmulhsu_vv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vuint16m4_t op2,size_t vl,size_t ta)5754 vint16m4_t test_vmulhsu_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
5755   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5756 }
5757 
5758 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_mt(
5759 // CHECK-RV64-NEXT:  entry:
5760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5761 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
5762 //
test_vmulhsu_vx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,uint16_t op2,size_t vl,size_t ta)5763 vint16m4_t test_vmulhsu_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl, size_t ta) {
5764   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5765 }
5766 
5767 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_mt(
5768 // CHECK-RV64-NEXT:  entry:
5769 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5770 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
5771 //
test_vmulhsu_vv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vuint16m8_t op2,size_t vl,size_t ta)5772 vint16m8_t test_vmulhsu_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) {
5773   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5774 }
5775 
5776 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_mt(
5777 // CHECK-RV64-NEXT:  entry:
5778 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5779 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
5780 //
test_vmulhsu_vx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,uint16_t op2,size_t vl,size_t ta)5781 vint16m8_t test_vmulhsu_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl, size_t ta) {
5782   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5783 }
5784 
5785 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_mt(
5786 // CHECK-RV64-NEXT:  entry:
5787 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5788 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
5789 //
test_vmulhsu_vv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vuint32mf2_t op2,size_t vl,size_t ta)5790 vint32mf2_t test_vmulhsu_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
5791   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5792 }
5793 
5794 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_mt(
5795 // CHECK-RV64-NEXT:  entry:
5796 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5797 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
5798 //
test_vmulhsu_vx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,uint32_t op2,size_t vl,size_t ta)5799 vint32mf2_t test_vmulhsu_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) {
5800   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5801 }
5802 
5803 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_mt(
5804 // CHECK-RV64-NEXT:  entry:
5805 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5806 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5807 //
test_vmulhsu_vv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vuint32m1_t op2,size_t vl,size_t ta)5808 vint32m1_t test_vmulhsu_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
5809   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5810 }
5811 
5812 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_mt(
5813 // CHECK-RV64-NEXT:  entry:
5814 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5815 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
5816 //
test_vmulhsu_vx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,uint32_t op2,size_t vl,size_t ta)5817 vint32m1_t test_vmulhsu_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl, size_t ta) {
5818   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5819 }
5820 
5821 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_mt(
5822 // CHECK-RV64-NEXT:  entry:
5823 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5824 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5825 //
test_vmulhsu_vv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vuint32m2_t op2,size_t vl,size_t ta)5826 vint32m2_t test_vmulhsu_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
5827   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5828 }
5829 
5830 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_mt(
5831 // CHECK-RV64-NEXT:  entry:
5832 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5833 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
5834 //
test_vmulhsu_vx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,uint32_t op2,size_t vl,size_t ta)5835 vint32m2_t test_vmulhsu_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl, size_t ta) {
5836   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5837 }
5838 
5839 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_mt(
5840 // CHECK-RV64-NEXT:  entry:
5841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5842 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5843 //
test_vmulhsu_vv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vuint32m4_t op2,size_t vl,size_t ta)5844 vint32m4_t test_vmulhsu_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
5845   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5846 }
5847 
5848 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_mt(
5849 // CHECK-RV64-NEXT:  entry:
5850 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5851 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
5852 //
test_vmulhsu_vx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,uint32_t op2,size_t vl,size_t ta)5853 vint32m4_t test_vmulhsu_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl, size_t ta) {
5854   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5855 }
5856 
5857 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_mt(
5858 // CHECK-RV64-NEXT:  entry:
5859 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5860 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5861 //
test_vmulhsu_vv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vuint32m8_t op2,size_t vl,size_t ta)5862 vint32m8_t test_vmulhsu_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) {
5863   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5864 }
5865 
5866 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_mt(
5867 // CHECK-RV64-NEXT:  entry:
5868 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5869 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
5870 //
test_vmulhsu_vx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,uint32_t op2,size_t vl,size_t ta)5871 vint32m8_t test_vmulhsu_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl, size_t ta) {
5872   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5873 }
5874 
5875 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_mt(
5876 // CHECK-RV64-NEXT:  entry:
5877 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5878 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5879 //
test_vmulhsu_vv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vuint64m1_t op2,size_t vl,size_t ta)5880 vint64m1_t test_vmulhsu_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) {
5881   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5882 }
5883 
5884 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_mt(
5885 // CHECK-RV64-NEXT:  entry:
5886 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5887 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
5888 //
test_vmulhsu_vx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,uint64_t op2,size_t vl,size_t ta)5889 vint64m1_t test_vmulhsu_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl, size_t ta) {
5890   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5891 }
5892 
5893 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_mt(
5894 // CHECK-RV64-NEXT:  entry:
5895 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5896 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5897 //
test_vmulhsu_vv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vuint64m2_t op2,size_t vl,size_t ta)5898 vint64m2_t test_vmulhsu_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) {
5899   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5900 }
5901 
5902 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_mt(
5903 // CHECK-RV64-NEXT:  entry:
5904 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5905 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
5906 //
test_vmulhsu_vx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,uint64_t op2,size_t vl,size_t ta)5907 vint64m2_t test_vmulhsu_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl, size_t ta) {
5908   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5909 }
5910 
5911 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_mt(
5912 // CHECK-RV64-NEXT:  entry:
5913 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5914 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5915 //
test_vmulhsu_vv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vuint64m4_t op2,size_t vl,size_t ta)5916 vint64m4_t test_vmulhsu_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) {
5917   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5918 }
5919 
5920 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_mt(
5921 // CHECK-RV64-NEXT:  entry:
5922 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5923 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
5924 //
test_vmulhsu_vx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,uint64_t op2,size_t vl,size_t ta)5925 vint64m4_t test_vmulhsu_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl, size_t ta) {
5926   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5927 }
5928 
5929 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_mt(
5930 // CHECK-RV64-NEXT:  entry:
5931 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5932 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5933 //
test_vmulhsu_vv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vuint64m8_t op2,size_t vl,size_t ta)5934 vint64m8_t test_vmulhsu_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) {
5935   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5936 }
5937 
5938 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_mt(
5939 // CHECK-RV64-NEXT:  entry:
5940 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
5941 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
5942 //
test_vmulhsu_vx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,uint64_t op2,size_t vl,size_t ta)5943 vint64m8_t test_vmulhsu_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl, size_t ta) {
5944   return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
5945 }
5946 
5947