1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i32);
8
9define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmulh.vv v8, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
17    <vscale x 1 x i8> %0,
18    <vscale x 1 x i8> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i8> %a
22}
23
24declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
25  <vscale x 1 x i8>,
26  <vscale x 1 x i8>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i1>,
29  i32);
30
31define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
32; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
35; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
36; CHECK-NEXT:    ret
37entry:
38  %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
39    <vscale x 1 x i8> %0,
40    <vscale x 1 x i8> %1,
41    <vscale x 1 x i8> %2,
42    <vscale x 1 x i1> %3,
43    i32 %4)
44
45  ret <vscale x 1 x i8> %a
46}
47
48declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
49  <vscale x 2 x i8>,
50  <vscale x 2 x i8>,
51  i32);
52
53define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
54; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
57; CHECK-NEXT:    vmulh.vv v8, v8, v9
58; CHECK-NEXT:    ret
59entry:
60  %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
61    <vscale x 2 x i8> %0,
62    <vscale x 2 x i8> %1,
63    i32 %2)
64
65  ret <vscale x 2 x i8> %a
66}
67
68declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
69  <vscale x 2 x i8>,
70  <vscale x 2 x i8>,
71  <vscale x 2 x i8>,
72  <vscale x 2 x i1>,
73  i32);
74
75define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
76; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
79; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
80; CHECK-NEXT:    ret
81entry:
82  %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
83    <vscale x 2 x i8> %0,
84    <vscale x 2 x i8> %1,
85    <vscale x 2 x i8> %2,
86    <vscale x 2 x i1> %3,
87    i32 %4)
88
89  ret <vscale x 2 x i8> %a
90}
91
92declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
93  <vscale x 4 x i8>,
94  <vscale x 4 x i8>,
95  i32);
96
97define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
98; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
101; CHECK-NEXT:    vmulh.vv v8, v8, v9
102; CHECK-NEXT:    ret
103entry:
104  %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
105    <vscale x 4 x i8> %0,
106    <vscale x 4 x i8> %1,
107    i32 %2)
108
109  ret <vscale x 4 x i8> %a
110}
111
112declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
113  <vscale x 4 x i8>,
114  <vscale x 4 x i8>,
115  <vscale x 4 x i8>,
116  <vscale x 4 x i1>,
117  i32);
118
119define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
120; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8:
121; CHECK:       # %bb.0: # %entry
122; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
123; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
124; CHECK-NEXT:    ret
125entry:
126  %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
127    <vscale x 4 x i8> %0,
128    <vscale x 4 x i8> %1,
129    <vscale x 4 x i8> %2,
130    <vscale x 4 x i1> %3,
131    i32 %4)
132
133  ret <vscale x 4 x i8> %a
134}
135
136declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
137  <vscale x 8 x i8>,
138  <vscale x 8 x i8>,
139  i32);
140
141define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
142; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
145; CHECK-NEXT:    vmulh.vv v8, v8, v9
146; CHECK-NEXT:    ret
147entry:
148  %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
149    <vscale x 8 x i8> %0,
150    <vscale x 8 x i8> %1,
151    i32 %2)
152
153  ret <vscale x 8 x i8> %a
154}
155
156declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
157  <vscale x 8 x i8>,
158  <vscale x 8 x i8>,
159  <vscale x 8 x i8>,
160  <vscale x 8 x i1>,
161  i32);
162
163define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
164; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
167; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
168; CHECK-NEXT:    ret
169entry:
170  %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
171    <vscale x 8 x i8> %0,
172    <vscale x 8 x i8> %1,
173    <vscale x 8 x i8> %2,
174    <vscale x 8 x i1> %3,
175    i32 %4)
176
177  ret <vscale x 8 x i8> %a
178}
179
180declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
181  <vscale x 16 x i8>,
182  <vscale x 16 x i8>,
183  i32);
184
185define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
186; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8:
187; CHECK:       # %bb.0: # %entry
188; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
189; CHECK-NEXT:    vmulh.vv v8, v8, v10
190; CHECK-NEXT:    ret
191entry:
192  %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
193    <vscale x 16 x i8> %0,
194    <vscale x 16 x i8> %1,
195    i32 %2)
196
197  ret <vscale x 16 x i8> %a
198}
199
200declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
201  <vscale x 16 x i8>,
202  <vscale x 16 x i8>,
203  <vscale x 16 x i8>,
204  <vscale x 16 x i1>,
205  i32);
206
207define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
208; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8:
209; CHECK:       # %bb.0: # %entry
210; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
211; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
212; CHECK-NEXT:    ret
213entry:
214  %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
215    <vscale x 16 x i8> %0,
216    <vscale x 16 x i8> %1,
217    <vscale x 16 x i8> %2,
218    <vscale x 16 x i1> %3,
219    i32 %4)
220
221  ret <vscale x 16 x i8> %a
222}
223
224declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
225  <vscale x 32 x i8>,
226  <vscale x 32 x i8>,
227  i32);
228
229define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
230; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
233; CHECK-NEXT:    vmulh.vv v8, v8, v12
234; CHECK-NEXT:    ret
235entry:
236  %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
237    <vscale x 32 x i8> %0,
238    <vscale x 32 x i8> %1,
239    i32 %2)
240
241  ret <vscale x 32 x i8> %a
242}
243
244declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
245  <vscale x 32 x i8>,
246  <vscale x 32 x i8>,
247  <vscale x 32 x i8>,
248  <vscale x 32 x i1>,
249  i32);
250
251define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
252; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8:
253; CHECK:       # %bb.0: # %entry
254; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
255; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
256; CHECK-NEXT:    ret
257entry:
258  %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
259    <vscale x 32 x i8> %0,
260    <vscale x 32 x i8> %1,
261    <vscale x 32 x i8> %2,
262    <vscale x 32 x i1> %3,
263    i32 %4)
264
265  ret <vscale x 32 x i8> %a
266}
267
268declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
269  <vscale x 64 x i8>,
270  <vscale x 64 x i8>,
271  i32);
272
273define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
274; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8:
275; CHECK:       # %bb.0: # %entry
276; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
277; CHECK-NEXT:    vmulh.vv v8, v8, v16
278; CHECK-NEXT:    ret
279entry:
280  %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
281    <vscale x 64 x i8> %0,
282    <vscale x 64 x i8> %1,
283    i32 %2)
284
285  ret <vscale x 64 x i8> %a
286}
287
288declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
289  <vscale x 64 x i8>,
290  <vscale x 64 x i8>,
291  <vscale x 64 x i8>,
292  <vscale x 64 x i1>,
293  i32);
294
295define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
296; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vl8r.v v24, (a0)
299; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, mu
300; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
304    <vscale x 64 x i8> %0,
305    <vscale x 64 x i8> %1,
306    <vscale x 64 x i8> %2,
307    <vscale x 64 x i1> %3,
308    i32 %4)
309
310  ret <vscale x 64 x i8> %a
311}
312
313declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
314  <vscale x 1 x i16>,
315  <vscale x 1 x i16>,
316  i32);
317
318define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
319; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
322; CHECK-NEXT:    vmulh.vv v8, v8, v9
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
326    <vscale x 1 x i16> %0,
327    <vscale x 1 x i16> %1,
328    i32 %2)
329
330  ret <vscale x 1 x i16> %a
331}
332
333declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
334  <vscale x 1 x i16>,
335  <vscale x 1 x i16>,
336  <vscale x 1 x i16>,
337  <vscale x 1 x i1>,
338  i32);
339
340define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
341; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
344; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
348    <vscale x 1 x i16> %0,
349    <vscale x 1 x i16> %1,
350    <vscale x 1 x i16> %2,
351    <vscale x 1 x i1> %3,
352    i32 %4)
353
354  ret <vscale x 1 x i16> %a
355}
356
357declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
358  <vscale x 2 x i16>,
359  <vscale x 2 x i16>,
360  i32);
361
362define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
363; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
366; CHECK-NEXT:    vmulh.vv v8, v8, v9
367; CHECK-NEXT:    ret
368entry:
369  %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
370    <vscale x 2 x i16> %0,
371    <vscale x 2 x i16> %1,
372    i32 %2)
373
374  ret <vscale x 2 x i16> %a
375}
376
377declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
378  <vscale x 2 x i16>,
379  <vscale x 2 x i16>,
380  <vscale x 2 x i16>,
381  <vscale x 2 x i1>,
382  i32);
383
384define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
385; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16:
386; CHECK:       # %bb.0: # %entry
387; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
388; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
389; CHECK-NEXT:    ret
390entry:
391  %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
392    <vscale x 2 x i16> %0,
393    <vscale x 2 x i16> %1,
394    <vscale x 2 x i16> %2,
395    <vscale x 2 x i1> %3,
396    i32 %4)
397
398  ret <vscale x 2 x i16> %a
399}
400
401declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
402  <vscale x 4 x i16>,
403  <vscale x 4 x i16>,
404  i32);
405
406define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
407; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16:
408; CHECK:       # %bb.0: # %entry
409; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
410; CHECK-NEXT:    vmulh.vv v8, v8, v9
411; CHECK-NEXT:    ret
412entry:
413  %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
414    <vscale x 4 x i16> %0,
415    <vscale x 4 x i16> %1,
416    i32 %2)
417
418  ret <vscale x 4 x i16> %a
419}
420
421declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
422  <vscale x 4 x i16>,
423  <vscale x 4 x i16>,
424  <vscale x 4 x i16>,
425  <vscale x 4 x i1>,
426  i32);
427
428define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
429; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
432; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
436    <vscale x 4 x i16> %0,
437    <vscale x 4 x i16> %1,
438    <vscale x 4 x i16> %2,
439    <vscale x 4 x i1> %3,
440    i32 %4)
441
442  ret <vscale x 4 x i16> %a
443}
444
445declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
446  <vscale x 8 x i16>,
447  <vscale x 8 x i16>,
448  i32);
449
450define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
451; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
454; CHECK-NEXT:    vmulh.vv v8, v8, v10
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
458    <vscale x 8 x i16> %0,
459    <vscale x 8 x i16> %1,
460    i32 %2)
461
462  ret <vscale x 8 x i16> %a
463}
464
465declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
466  <vscale x 8 x i16>,
467  <vscale x 8 x i16>,
468  <vscale x 8 x i16>,
469  <vscale x 8 x i1>,
470  i32);
471
472define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
473; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
476; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
477; CHECK-NEXT:    ret
478entry:
479  %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
480    <vscale x 8 x i16> %0,
481    <vscale x 8 x i16> %1,
482    <vscale x 8 x i16> %2,
483    <vscale x 8 x i1> %3,
484    i32 %4)
485
486  ret <vscale x 8 x i16> %a
487}
488
489declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
490  <vscale x 16 x i16>,
491  <vscale x 16 x i16>,
492  i32);
493
494define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
495; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
498; CHECK-NEXT:    vmulh.vv v8, v8, v12
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
502    <vscale x 16 x i16> %0,
503    <vscale x 16 x i16> %1,
504    i32 %2)
505
506  ret <vscale x 16 x i16> %a
507}
508
509declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
510  <vscale x 16 x i16>,
511  <vscale x 16 x i16>,
512  <vscale x 16 x i16>,
513  <vscale x 16 x i1>,
514  i32);
515
516define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
517; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
520; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
524    <vscale x 16 x i16> %0,
525    <vscale x 16 x i16> %1,
526    <vscale x 16 x i16> %2,
527    <vscale x 16 x i1> %3,
528    i32 %4)
529
530  ret <vscale x 16 x i16> %a
531}
532
533declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
534  <vscale x 32 x i16>,
535  <vscale x 32 x i16>,
536  i32);
537
538define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
539; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
542; CHECK-NEXT:    vmulh.vv v8, v8, v16
543; CHECK-NEXT:    ret
544entry:
545  %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
546    <vscale x 32 x i16> %0,
547    <vscale x 32 x i16> %1,
548    i32 %2)
549
550  ret <vscale x 32 x i16> %a
551}
552
553declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
554  <vscale x 32 x i16>,
555  <vscale x 32 x i16>,
556  <vscale x 32 x i16>,
557  <vscale x 32 x i1>,
558  i32);
559
560define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
561; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vl8re16.v v24, (a0)
564; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
565; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
566; CHECK-NEXT:    ret
567entry:
568  %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
569    <vscale x 32 x i16> %0,
570    <vscale x 32 x i16> %1,
571    <vscale x 32 x i16> %2,
572    <vscale x 32 x i1> %3,
573    i32 %4)
574
575  ret <vscale x 32 x i16> %a
576}
577
578declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
579  <vscale x 1 x i32>,
580  <vscale x 1 x i32>,
581  i32);
582
583define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
584; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32:
585; CHECK:       # %bb.0: # %entry
586; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
587; CHECK-NEXT:    vmulh.vv v8, v8, v9
588; CHECK-NEXT:    ret
589entry:
590  %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
591    <vscale x 1 x i32> %0,
592    <vscale x 1 x i32> %1,
593    i32 %2)
594
595  ret <vscale x 1 x i32> %a
596}
597
598declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
599  <vscale x 1 x i32>,
600  <vscale x 1 x i32>,
601  <vscale x 1 x i32>,
602  <vscale x 1 x i1>,
603  i32);
604
605define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
606; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32:
607; CHECK:       # %bb.0: # %entry
608; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
609; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
610; CHECK-NEXT:    ret
611entry:
612  %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
613    <vscale x 1 x i32> %0,
614    <vscale x 1 x i32> %1,
615    <vscale x 1 x i32> %2,
616    <vscale x 1 x i1> %3,
617    i32 %4)
618
619  ret <vscale x 1 x i32> %a
620}
621
622declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
623  <vscale x 2 x i32>,
624  <vscale x 2 x i32>,
625  i32);
626
627define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
628; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32:
629; CHECK:       # %bb.0: # %entry
630; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
631; CHECK-NEXT:    vmulh.vv v8, v8, v9
632; CHECK-NEXT:    ret
633entry:
634  %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
635    <vscale x 2 x i32> %0,
636    <vscale x 2 x i32> %1,
637    i32 %2)
638
639  ret <vscale x 2 x i32> %a
640}
641
642declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
643  <vscale x 2 x i32>,
644  <vscale x 2 x i32>,
645  <vscale x 2 x i32>,
646  <vscale x 2 x i1>,
647  i32);
648
649define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
650; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
653; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
654; CHECK-NEXT:    ret
655entry:
656  %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
657    <vscale x 2 x i32> %0,
658    <vscale x 2 x i32> %1,
659    <vscale x 2 x i32> %2,
660    <vscale x 2 x i1> %3,
661    i32 %4)
662
663  ret <vscale x 2 x i32> %a
664}
665
666declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
667  <vscale x 4 x i32>,
668  <vscale x 4 x i32>,
669  i32);
670
671define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
672; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
675; CHECK-NEXT:    vmulh.vv v8, v8, v10
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
679    <vscale x 4 x i32> %0,
680    <vscale x 4 x i32> %1,
681    i32 %2)
682
683  ret <vscale x 4 x i32> %a
684}
685
686declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
687  <vscale x 4 x i32>,
688  <vscale x 4 x i32>,
689  <vscale x 4 x i32>,
690  <vscale x 4 x i1>,
691  i32);
692
693define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
694; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32:
695; CHECK:       # %bb.0: # %entry
696; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
697; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
698; CHECK-NEXT:    ret
699entry:
700  %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
701    <vscale x 4 x i32> %0,
702    <vscale x 4 x i32> %1,
703    <vscale x 4 x i32> %2,
704    <vscale x 4 x i1> %3,
705    i32 %4)
706
707  ret <vscale x 4 x i32> %a
708}
709
710declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
711  <vscale x 8 x i32>,
712  <vscale x 8 x i32>,
713  i32);
714
715define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
716; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32:
717; CHECK:       # %bb.0: # %entry
718; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
719; CHECK-NEXT:    vmulh.vv v8, v8, v12
720; CHECK-NEXT:    ret
721entry:
722  %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
723    <vscale x 8 x i32> %0,
724    <vscale x 8 x i32> %1,
725    i32 %2)
726
727  ret <vscale x 8 x i32> %a
728}
729
730declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
731  <vscale x 8 x i32>,
732  <vscale x 8 x i32>,
733  <vscale x 8 x i32>,
734  <vscale x 8 x i1>,
735  i32);
736
737define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
738; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32:
739; CHECK:       # %bb.0: # %entry
740; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
741; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
742; CHECK-NEXT:    ret
743entry:
744  %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
745    <vscale x 8 x i32> %0,
746    <vscale x 8 x i32> %1,
747    <vscale x 8 x i32> %2,
748    <vscale x 8 x i1> %3,
749    i32 %4)
750
751  ret <vscale x 8 x i32> %a
752}
753
754declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
755  <vscale x 16 x i32>,
756  <vscale x 16 x i32>,
757  i32);
758
759define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
760; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32:
761; CHECK:       # %bb.0: # %entry
762; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
763; CHECK-NEXT:    vmulh.vv v8, v8, v16
764; CHECK-NEXT:    ret
765entry:
766  %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
767    <vscale x 16 x i32> %0,
768    <vscale x 16 x i32> %1,
769    i32 %2)
770
771  ret <vscale x 16 x i32> %a
772}
773
774declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
775  <vscale x 16 x i32>,
776  <vscale x 16 x i32>,
777  <vscale x 16 x i32>,
778  <vscale x 16 x i1>,
779  i32);
780
781define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
782; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
783; CHECK:       # %bb.0: # %entry
784; CHECK-NEXT:    vl8re32.v v24, (a0)
785; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
786; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
787; CHECK-NEXT:    ret
788entry:
789  %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
790    <vscale x 16 x i32> %0,
791    <vscale x 16 x i32> %1,
792    <vscale x 16 x i32> %2,
793    <vscale x 16 x i1> %3,
794    i32 %4)
795
796  ret <vscale x 16 x i32> %a
797}
798
799declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
800  <vscale x 1 x i64>,
801  <vscale x 1 x i64>,
802  i32);
803
804define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
805; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64:
806; CHECK:       # %bb.0: # %entry
807; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
808; CHECK-NEXT:    vmulh.vv v8, v8, v9
809; CHECK-NEXT:    ret
810entry:
811  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
812    <vscale x 1 x i64> %0,
813    <vscale x 1 x i64> %1,
814    i32 %2)
815
816  ret <vscale x 1 x i64> %a
817}
818
819declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
820  <vscale x 1 x i64>,
821  <vscale x 1 x i64>,
822  <vscale x 1 x i64>,
823  <vscale x 1 x i1>,
824  i32);
825
826define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
827; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64:
828; CHECK:       # %bb.0: # %entry
829; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
830; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
831; CHECK-NEXT:    ret
832entry:
833  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
834    <vscale x 1 x i64> %0,
835    <vscale x 1 x i64> %1,
836    <vscale x 1 x i64> %2,
837    <vscale x 1 x i1> %3,
838    i32 %4)
839
840  ret <vscale x 1 x i64> %a
841}
842
843declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
844  <vscale x 2 x i64>,
845  <vscale x 2 x i64>,
846  i32);
847
848define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
849; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64:
850; CHECK:       # %bb.0: # %entry
851; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
852; CHECK-NEXT:    vmulh.vv v8, v8, v10
853; CHECK-NEXT:    ret
854entry:
855  %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
856    <vscale x 2 x i64> %0,
857    <vscale x 2 x i64> %1,
858    i32 %2)
859
860  ret <vscale x 2 x i64> %a
861}
862
863declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
864  <vscale x 2 x i64>,
865  <vscale x 2 x i64>,
866  <vscale x 2 x i64>,
867  <vscale x 2 x i1>,
868  i32);
869
870define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
871; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64:
872; CHECK:       # %bb.0: # %entry
873; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
874; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
875; CHECK-NEXT:    ret
876entry:
877  %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
878    <vscale x 2 x i64> %0,
879    <vscale x 2 x i64> %1,
880    <vscale x 2 x i64> %2,
881    <vscale x 2 x i1> %3,
882    i32 %4)
883
884  ret <vscale x 2 x i64> %a
885}
886
887declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
888  <vscale x 4 x i64>,
889  <vscale x 4 x i64>,
890  i32);
891
892define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
893; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64:
894; CHECK:       # %bb.0: # %entry
895; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
896; CHECK-NEXT:    vmulh.vv v8, v8, v12
897; CHECK-NEXT:    ret
898entry:
899  %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
900    <vscale x 4 x i64> %0,
901    <vscale x 4 x i64> %1,
902    i32 %2)
903
904  ret <vscale x 4 x i64> %a
905}
906
907declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
908  <vscale x 4 x i64>,
909  <vscale x 4 x i64>,
910  <vscale x 4 x i64>,
911  <vscale x 4 x i1>,
912  i32);
913
914define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
915; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64:
916; CHECK:       # %bb.0: # %entry
917; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
918; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
919; CHECK-NEXT:    ret
920entry:
921  %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
922    <vscale x 4 x i64> %0,
923    <vscale x 4 x i64> %1,
924    <vscale x 4 x i64> %2,
925    <vscale x 4 x i1> %3,
926    i32 %4)
927
928  ret <vscale x 4 x i64> %a
929}
930
931declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
932  <vscale x 8 x i64>,
933  <vscale x 8 x i64>,
934  i32);
935
936define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
937; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64:
938; CHECK:       # %bb.0: # %entry
939; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
940; CHECK-NEXT:    vmulh.vv v8, v8, v16
941; CHECK-NEXT:    ret
942entry:
943  %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
944    <vscale x 8 x i64> %0,
945    <vscale x 8 x i64> %1,
946    i32 %2)
947
948  ret <vscale x 8 x i64> %a
949}
950
951declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
952  <vscale x 8 x i64>,
953  <vscale x 8 x i64>,
954  <vscale x 8 x i64>,
955  <vscale x 8 x i1>,
956  i32);
957
958define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
959; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    vl8re64.v v24, (a0)
962; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
963; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
964; CHECK-NEXT:    ret
965entry:
966  %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
967    <vscale x 8 x i64> %0,
968    <vscale x 8 x i64> %1,
969    <vscale x 8 x i64> %2,
970    <vscale x 8 x i1> %3,
971    i32 %4)
972
973  ret <vscale x 8 x i64> %a
974}
975
976declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
977  <vscale x 1 x i8>,
978  i8,
979  i32);
980
981define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
982; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8:
983; CHECK:       # %bb.0: # %entry
984; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
985; CHECK-NEXT:    vmulh.vx v8, v8, a0
986; CHECK-NEXT:    ret
987entry:
988  %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
989    <vscale x 1 x i8> %0,
990    i8 %1,
991    i32 %2)
992
993  ret <vscale x 1 x i8> %a
994}
995
996declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
997  <vscale x 1 x i8>,
998  <vscale x 1 x i8>,
999  i8,
1000  <vscale x 1 x i1>,
1001  i32);
1002
1003define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1004; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8:
1005; CHECK:       # %bb.0: # %entry
1006; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
1007; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
1011    <vscale x 1 x i8> %0,
1012    <vscale x 1 x i8> %1,
1013    i8 %2,
1014    <vscale x 1 x i1> %3,
1015    i32 %4)
1016
1017  ret <vscale x 1 x i8> %a
1018}
1019
1020declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
1021  <vscale x 2 x i8>,
1022  i8,
1023  i32);
1024
1025define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
1026; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1029; CHECK-NEXT:    vmulh.vx v8, v8, a0
1030; CHECK-NEXT:    ret
1031entry:
1032  %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
1033    <vscale x 2 x i8> %0,
1034    i8 %1,
1035    i32 %2)
1036
1037  ret <vscale x 2 x i8> %a
1038}
1039
1040declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
1041  <vscale x 2 x i8>,
1042  <vscale x 2 x i8>,
1043  i8,
1044  <vscale x 2 x i1>,
1045  i32);
1046
1047define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1048; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8:
1049; CHECK:       # %bb.0: # %entry
1050; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
1051; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1052; CHECK-NEXT:    ret
1053entry:
1054  %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
1055    <vscale x 2 x i8> %0,
1056    <vscale x 2 x i8> %1,
1057    i8 %2,
1058    <vscale x 2 x i1> %3,
1059    i32 %4)
1060
1061  ret <vscale x 2 x i8> %a
1062}
1063
1064declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
1065  <vscale x 4 x i8>,
1066  i8,
1067  i32);
1068
1069define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
1070; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8:
1071; CHECK:       # %bb.0: # %entry
1072; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1073; CHECK-NEXT:    vmulh.vx v8, v8, a0
1074; CHECK-NEXT:    ret
1075entry:
1076  %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
1077    <vscale x 4 x i8> %0,
1078    i8 %1,
1079    i32 %2)
1080
1081  ret <vscale x 4 x i8> %a
1082}
1083
1084declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
1085  <vscale x 4 x i8>,
1086  <vscale x 4 x i8>,
1087  i8,
1088  <vscale x 4 x i1>,
1089  i32);
1090
1091define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1092; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8:
1093; CHECK:       # %bb.0: # %entry
1094; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
1095; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1096; CHECK-NEXT:    ret
1097entry:
1098  %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
1099    <vscale x 4 x i8> %0,
1100    <vscale x 4 x i8> %1,
1101    i8 %2,
1102    <vscale x 4 x i1> %3,
1103    i32 %4)
1104
1105  ret <vscale x 4 x i8> %a
1106}
1107
1108declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
1109  <vscale x 8 x i8>,
1110  i8,
1111  i32);
1112
1113define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
1114; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8:
1115; CHECK:       # %bb.0: # %entry
1116; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1117; CHECK-NEXT:    vmulh.vx v8, v8, a0
1118; CHECK-NEXT:    ret
1119entry:
1120  %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
1121    <vscale x 8 x i8> %0,
1122    i8 %1,
1123    i32 %2)
1124
1125  ret <vscale x 8 x i8> %a
1126}
1127
1128declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
1129  <vscale x 8 x i8>,
1130  <vscale x 8 x i8>,
1131  i8,
1132  <vscale x 8 x i1>,
1133  i32);
1134
1135define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1136; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8:
1137; CHECK:       # %bb.0: # %entry
1138; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
1139; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1140; CHECK-NEXT:    ret
1141entry:
1142  %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
1143    <vscale x 8 x i8> %0,
1144    <vscale x 8 x i8> %1,
1145    i8 %2,
1146    <vscale x 8 x i1> %3,
1147    i32 %4)
1148
1149  ret <vscale x 8 x i8> %a
1150}
1151
1152declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
1153  <vscale x 16 x i8>,
1154  i8,
1155  i32);
1156
1157define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
1158; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8:
1159; CHECK:       # %bb.0: # %entry
1160; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1161; CHECK-NEXT:    vmulh.vx v8, v8, a0
1162; CHECK-NEXT:    ret
1163entry:
1164  %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
1165    <vscale x 16 x i8> %0,
1166    i8 %1,
1167    i32 %2)
1168
1169  ret <vscale x 16 x i8> %a
1170}
1171
1172declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
1173  <vscale x 16 x i8>,
1174  <vscale x 16 x i8>,
1175  i8,
1176  <vscale x 16 x i1>,
1177  i32);
1178
1179define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1180; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8:
1181; CHECK:       # %bb.0: # %entry
1182; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1183; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
1184; CHECK-NEXT:    ret
1185entry:
1186  %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
1187    <vscale x 16 x i8> %0,
1188    <vscale x 16 x i8> %1,
1189    i8 %2,
1190    <vscale x 16 x i1> %3,
1191    i32 %4)
1192
1193  ret <vscale x 16 x i8> %a
1194}
1195
1196declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
1197  <vscale x 32 x i8>,
1198  i8,
1199  i32);
1200
1201define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
1202; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8:
1203; CHECK:       # %bb.0: # %entry
1204; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1205; CHECK-NEXT:    vmulh.vx v8, v8, a0
1206; CHECK-NEXT:    ret
1207entry:
1208  %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
1209    <vscale x 32 x i8> %0,
1210    i8 %1,
1211    i32 %2)
1212
1213  ret <vscale x 32 x i8> %a
1214}
1215
1216declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
1217  <vscale x 32 x i8>,
1218  <vscale x 32 x i8>,
1219  i8,
1220  <vscale x 32 x i1>,
1221  i32);
1222
1223define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1224; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8:
1225; CHECK:       # %bb.0: # %entry
1226; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
1227; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
1228; CHECK-NEXT:    ret
1229entry:
1230  %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
1231    <vscale x 32 x i8> %0,
1232    <vscale x 32 x i8> %1,
1233    i8 %2,
1234    <vscale x 32 x i1> %3,
1235    i32 %4)
1236
1237  ret <vscale x 32 x i8> %a
1238}
1239
1240declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
1241  <vscale x 64 x i8>,
1242  i8,
1243  i32);
1244
1245define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
1246; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8:
1247; CHECK:       # %bb.0: # %entry
1248; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1249; CHECK-NEXT:    vmulh.vx v8, v8, a0
1250; CHECK-NEXT:    ret
1251entry:
1252  %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
1253    <vscale x 64 x i8> %0,
1254    i8 %1,
1255    i32 %2)
1256
1257  ret <vscale x 64 x i8> %a
1258}
1259
1260declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
1261  <vscale x 64 x i8>,
1262  <vscale x 64 x i8>,
1263  i8,
1264  <vscale x 64 x i1>,
1265  i32);
1266
1267define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
1268; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8:
1269; CHECK:       # %bb.0: # %entry
1270; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, mu
1271; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
1272; CHECK-NEXT:    ret
1273entry:
1274  %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
1275    <vscale x 64 x i8> %0,
1276    <vscale x 64 x i8> %1,
1277    i8 %2,
1278    <vscale x 64 x i1> %3,
1279    i32 %4)
1280
1281  ret <vscale x 64 x i8> %a
1282}
1283
1284declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
1285  <vscale x 1 x i16>,
1286  i16,
1287  i32);
1288
1289define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
1290; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16:
1291; CHECK:       # %bb.0: # %entry
1292; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1293; CHECK-NEXT:    vmulh.vx v8, v8, a0
1294; CHECK-NEXT:    ret
1295entry:
1296  %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
1297    <vscale x 1 x i16> %0,
1298    i16 %1,
1299    i32 %2)
1300
1301  ret <vscale x 1 x i16> %a
1302}
1303
1304declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
1305  <vscale x 1 x i16>,
1306  <vscale x 1 x i16>,
1307  i16,
1308  <vscale x 1 x i1>,
1309  i32);
1310
1311define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1312; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16:
1313; CHECK:       # %bb.0: # %entry
1314; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1315; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1316; CHECK-NEXT:    ret
1317entry:
1318  %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
1319    <vscale x 1 x i16> %0,
1320    <vscale x 1 x i16> %1,
1321    i16 %2,
1322    <vscale x 1 x i1> %3,
1323    i32 %4)
1324
1325  ret <vscale x 1 x i16> %a
1326}
1327
1328declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
1329  <vscale x 2 x i16>,
1330  i16,
1331  i32);
1332
1333define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
1334; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16:
1335; CHECK:       # %bb.0: # %entry
1336; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1337; CHECK-NEXT:    vmulh.vx v8, v8, a0
1338; CHECK-NEXT:    ret
1339entry:
1340  %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
1341    <vscale x 2 x i16> %0,
1342    i16 %1,
1343    i32 %2)
1344
1345  ret <vscale x 2 x i16> %a
1346}
1347
1348declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
1349  <vscale x 2 x i16>,
1350  <vscale x 2 x i16>,
1351  i16,
1352  <vscale x 2 x i1>,
1353  i32);
1354
1355define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1356; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16:
1357; CHECK:       # %bb.0: # %entry
1358; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1359; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1360; CHECK-NEXT:    ret
1361entry:
1362  %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
1363    <vscale x 2 x i16> %0,
1364    <vscale x 2 x i16> %1,
1365    i16 %2,
1366    <vscale x 2 x i1> %3,
1367    i32 %4)
1368
1369  ret <vscale x 2 x i16> %a
1370}
1371
1372declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
1373  <vscale x 4 x i16>,
1374  i16,
1375  i32);
1376
1377define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
1378; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1381; CHECK-NEXT:    vmulh.vx v8, v8, a0
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
1385    <vscale x 4 x i16> %0,
1386    i16 %1,
1387    i32 %2)
1388
1389  ret <vscale x 4 x i16> %a
1390}
1391
1392declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
1393  <vscale x 4 x i16>,
1394  <vscale x 4 x i16>,
1395  i16,
1396  <vscale x 4 x i1>,
1397  i32);
1398
1399define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1400; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16:
1401; CHECK:       # %bb.0: # %entry
1402; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1403; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1404; CHECK-NEXT:    ret
1405entry:
1406  %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
1407    <vscale x 4 x i16> %0,
1408    <vscale x 4 x i16> %1,
1409    i16 %2,
1410    <vscale x 4 x i1> %3,
1411    i32 %4)
1412
1413  ret <vscale x 4 x i16> %a
1414}
1415
1416declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
1417  <vscale x 8 x i16>,
1418  i16,
1419  i32);
1420
1421define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
1422; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16:
1423; CHECK:       # %bb.0: # %entry
1424; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1425; CHECK-NEXT:    vmulh.vx v8, v8, a0
1426; CHECK-NEXT:    ret
1427entry:
1428  %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
1429    <vscale x 8 x i16> %0,
1430    i16 %1,
1431    i32 %2)
1432
1433  ret <vscale x 8 x i16> %a
1434}
1435
1436declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
1437  <vscale x 8 x i16>,
1438  <vscale x 8 x i16>,
1439  i16,
1440  <vscale x 8 x i1>,
1441  i32);
1442
1443define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1444; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16:
1445; CHECK:       # %bb.0: # %entry
1446; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1447; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
1448; CHECK-NEXT:    ret
1449entry:
1450  %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
1451    <vscale x 8 x i16> %0,
1452    <vscale x 8 x i16> %1,
1453    i16 %2,
1454    <vscale x 8 x i1> %3,
1455    i32 %4)
1456
1457  ret <vscale x 8 x i16> %a
1458}
1459
1460declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
1461  <vscale x 16 x i16>,
1462  i16,
1463  i32);
1464
1465define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
1466; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16:
1467; CHECK:       # %bb.0: # %entry
1468; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1469; CHECK-NEXT:    vmulh.vx v8, v8, a0
1470; CHECK-NEXT:    ret
1471entry:
1472  %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
1473    <vscale x 16 x i16> %0,
1474    i16 %1,
1475    i32 %2)
1476
1477  ret <vscale x 16 x i16> %a
1478}
1479
1480declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
1481  <vscale x 16 x i16>,
1482  <vscale x 16 x i16>,
1483  i16,
1484  <vscale x 16 x i1>,
1485  i32);
1486
1487define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1488; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16:
1489; CHECK:       # %bb.0: # %entry
1490; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1491; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
1492; CHECK-NEXT:    ret
1493entry:
1494  %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
1495    <vscale x 16 x i16> %0,
1496    <vscale x 16 x i16> %1,
1497    i16 %2,
1498    <vscale x 16 x i1> %3,
1499    i32 %4)
1500
1501  ret <vscale x 16 x i16> %a
1502}
1503
1504declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
1505  <vscale x 32 x i16>,
1506  i16,
1507  i32);
1508
1509define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
1510; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16:
1511; CHECK:       # %bb.0: # %entry
1512; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1513; CHECK-NEXT:    vmulh.vx v8, v8, a0
1514; CHECK-NEXT:    ret
1515entry:
1516  %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
1517    <vscale x 32 x i16> %0,
1518    i16 %1,
1519    i32 %2)
1520
1521  ret <vscale x 32 x i16> %a
1522}
1523
1524declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
1525  <vscale x 32 x i16>,
1526  <vscale x 32 x i16>,
1527  i16,
1528  <vscale x 32 x i1>,
1529  i32);
1530
1531define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1532; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
1535; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
1536; CHECK-NEXT:    ret
1537entry:
1538  %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
1539    <vscale x 32 x i16> %0,
1540    <vscale x 32 x i16> %1,
1541    i16 %2,
1542    <vscale x 32 x i1> %3,
1543    i32 %4)
1544
1545  ret <vscale x 32 x i16> %a
1546}
1547
1548declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
1549  <vscale x 1 x i32>,
1550  i32,
1551  i32);
1552
1553define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
1554; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32:
1555; CHECK:       # %bb.0: # %entry
1556; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1557; CHECK-NEXT:    vmulh.vx v8, v8, a0
1558; CHECK-NEXT:    ret
1559entry:
1560  %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
1561    <vscale x 1 x i32> %0,
1562    i32 %1,
1563    i32 %2)
1564
1565  ret <vscale x 1 x i32> %a
1566}
1567
1568declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
1569  <vscale x 1 x i32>,
1570  <vscale x 1 x i32>,
1571  i32,
1572  <vscale x 1 x i1>,
1573  i32);
1574
1575define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1576; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32:
1577; CHECK:       # %bb.0: # %entry
1578; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1579; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1580; CHECK-NEXT:    ret
1581entry:
1582  %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
1583    <vscale x 1 x i32> %0,
1584    <vscale x 1 x i32> %1,
1585    i32 %2,
1586    <vscale x 1 x i1> %3,
1587    i32 %4)
1588
1589  ret <vscale x 1 x i32> %a
1590}
1591
1592declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
1593  <vscale x 2 x i32>,
1594  i32,
1595  i32);
1596
1597define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
1598; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32:
1599; CHECK:       # %bb.0: # %entry
1600; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1601; CHECK-NEXT:    vmulh.vx v8, v8, a0
1602; CHECK-NEXT:    ret
1603entry:
1604  %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
1605    <vscale x 2 x i32> %0,
1606    i32 %1,
1607    i32 %2)
1608
1609  ret <vscale x 2 x i32> %a
1610}
1611
1612declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
1613  <vscale x 2 x i32>,
1614  <vscale x 2 x i32>,
1615  i32,
1616  <vscale x 2 x i1>,
1617  i32);
1618
1619define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1620; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32:
1621; CHECK:       # %bb.0: # %entry
1622; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1623; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
1624; CHECK-NEXT:    ret
1625entry:
1626  %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
1627    <vscale x 2 x i32> %0,
1628    <vscale x 2 x i32> %1,
1629    i32 %2,
1630    <vscale x 2 x i1> %3,
1631    i32 %4)
1632
1633  ret <vscale x 2 x i32> %a
1634}
1635
1636declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
1637  <vscale x 4 x i32>,
1638  i32,
1639  i32);
1640
1641define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
1642; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32:
1643; CHECK:       # %bb.0: # %entry
1644; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1645; CHECK-NEXT:    vmulh.vx v8, v8, a0
1646; CHECK-NEXT:    ret
1647entry:
1648  %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
1649    <vscale x 4 x i32> %0,
1650    i32 %1,
1651    i32 %2)
1652
1653  ret <vscale x 4 x i32> %a
1654}
1655
1656declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
1657  <vscale x 4 x i32>,
1658  <vscale x 4 x i32>,
1659  i32,
1660  <vscale x 4 x i1>,
1661  i32);
1662
1663define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1664; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32:
1665; CHECK:       # %bb.0: # %entry
1666; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1667; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
1668; CHECK-NEXT:    ret
1669entry:
1670  %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
1671    <vscale x 4 x i32> %0,
1672    <vscale x 4 x i32> %1,
1673    i32 %2,
1674    <vscale x 4 x i1> %3,
1675    i32 %4)
1676
1677  ret <vscale x 4 x i32> %a
1678}
1679
1680declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
1681  <vscale x 8 x i32>,
1682  i32,
1683  i32);
1684
1685define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
1686; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32:
1687; CHECK:       # %bb.0: # %entry
1688; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1689; CHECK-NEXT:    vmulh.vx v8, v8, a0
1690; CHECK-NEXT:    ret
1691entry:
1692  %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
1693    <vscale x 8 x i32> %0,
1694    i32 %1,
1695    i32 %2)
1696
1697  ret <vscale x 8 x i32> %a
1698}
1699
1700declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
1701  <vscale x 8 x i32>,
1702  <vscale x 8 x i32>,
1703  i32,
1704  <vscale x 8 x i1>,
1705  i32);
1706
1707define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1708; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32:
1709; CHECK:       # %bb.0: # %entry
1710; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1711; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
1712; CHECK-NEXT:    ret
1713entry:
1714  %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
1715    <vscale x 8 x i32> %0,
1716    <vscale x 8 x i32> %1,
1717    i32 %2,
1718    <vscale x 8 x i1> %3,
1719    i32 %4)
1720
1721  ret <vscale x 8 x i32> %a
1722}
1723
1724declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
1725  <vscale x 16 x i32>,
1726  i32,
1727  i32);
1728
1729define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
1730; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32:
1731; CHECK:       # %bb.0: # %entry
1732; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1733; CHECK-NEXT:    vmulh.vx v8, v8, a0
1734; CHECK-NEXT:    ret
1735entry:
1736  %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
1737    <vscale x 16 x i32> %0,
1738    i32 %1,
1739    i32 %2)
1740
1741  ret <vscale x 16 x i32> %a
1742}
1743
1744declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
1745  <vscale x 16 x i32>,
1746  <vscale x 16 x i32>,
1747  i32,
1748  <vscale x 16 x i1>,
1749  i32);
1750
1751define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1752; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32:
1753; CHECK:       # %bb.0: # %entry
1754; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1755; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
1756; CHECK-NEXT:    ret
1757entry:
1758  %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
1759    <vscale x 16 x i32> %0,
1760    <vscale x 16 x i32> %1,
1761    i32 %2,
1762    <vscale x 16 x i1> %3,
1763    i32 %4)
1764
1765  ret <vscale x 16 x i32> %a
1766}
1767
1768declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
1769  <vscale x 1 x i64>,
1770  i64,
1771  i32);
1772
1773define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
1774; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64:
1775; CHECK:       # %bb.0: # %entry
1776; CHECK-NEXT:    addi sp, sp, -16
1777; CHECK-NEXT:    sw a1, 12(sp)
1778; CHECK-NEXT:    sw a0, 8(sp)
1779; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1780; CHECK-NEXT:    addi a0, sp, 8
1781; CHECK-NEXT:    vlse64.v v25, (a0), zero
1782; CHECK-NEXT:    vmulh.vv v8, v8, v25
1783; CHECK-NEXT:    addi sp, sp, 16
1784; CHECK-NEXT:    ret
1785entry:
1786  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
1787    <vscale x 1 x i64> %0,
1788    i64 %1,
1789    i32 %2)
1790
1791  ret <vscale x 1 x i64> %a
1792}
1793
1794declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
1795  <vscale x 1 x i64>,
1796  <vscale x 1 x i64>,
1797  i64,
1798  <vscale x 1 x i1>,
1799  i32);
1800
1801define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1802; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64:
1803; CHECK:       # %bb.0: # %entry
1804; CHECK-NEXT:    addi sp, sp, -16
1805; CHECK-NEXT:    sw a1, 12(sp)
1806; CHECK-NEXT:    sw a0, 8(sp)
1807; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1808; CHECK-NEXT:    addi a0, sp, 8
1809; CHECK-NEXT:    vlse64.v v25, (a0), zero
1810; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
1811; CHECK-NEXT:    vmulh.vv v8, v9, v25, v0.t
1812; CHECK-NEXT:    addi sp, sp, 16
1813; CHECK-NEXT:    ret
1814entry:
1815  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
1816    <vscale x 1 x i64> %0,
1817    <vscale x 1 x i64> %1,
1818    i64 %2,
1819    <vscale x 1 x i1> %3,
1820    i32 %4)
1821
1822  ret <vscale x 1 x i64> %a
1823}
1824
1825declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
1826  <vscale x 2 x i64>,
1827  i64,
1828  i32);
1829
1830define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
1831; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64:
1832; CHECK:       # %bb.0: # %entry
1833; CHECK-NEXT:    addi sp, sp, -16
1834; CHECK-NEXT:    sw a1, 12(sp)
1835; CHECK-NEXT:    sw a0, 8(sp)
1836; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1837; CHECK-NEXT:    addi a0, sp, 8
1838; CHECK-NEXT:    vlse64.v v26, (a0), zero
1839; CHECK-NEXT:    vmulh.vv v8, v8, v26
1840; CHECK-NEXT:    addi sp, sp, 16
1841; CHECK-NEXT:    ret
1842entry:
1843  %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
1844    <vscale x 2 x i64> %0,
1845    i64 %1,
1846    i32 %2)
1847
1848  ret <vscale x 2 x i64> %a
1849}
1850
1851declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
1852  <vscale x 2 x i64>,
1853  <vscale x 2 x i64>,
1854  i64,
1855  <vscale x 2 x i1>,
1856  i32);
1857
1858define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1859; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64:
1860; CHECK:       # %bb.0: # %entry
1861; CHECK-NEXT:    addi sp, sp, -16
1862; CHECK-NEXT:    sw a1, 12(sp)
1863; CHECK-NEXT:    sw a0, 8(sp)
1864; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1865; CHECK-NEXT:    addi a0, sp, 8
1866; CHECK-NEXT:    vlse64.v v26, (a0), zero
1867; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
1868; CHECK-NEXT:    vmulh.vv v8, v10, v26, v0.t
1869; CHECK-NEXT:    addi sp, sp, 16
1870; CHECK-NEXT:    ret
1871entry:
1872  %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
1873    <vscale x 2 x i64> %0,
1874    <vscale x 2 x i64> %1,
1875    i64 %2,
1876    <vscale x 2 x i1> %3,
1877    i32 %4)
1878
1879  ret <vscale x 2 x i64> %a
1880}
1881
1882declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
1883  <vscale x 4 x i64>,
1884  i64,
1885  i32);
1886
1887define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
1888; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64:
1889; CHECK:       # %bb.0: # %entry
1890; CHECK-NEXT:    addi sp, sp, -16
1891; CHECK-NEXT:    sw a1, 12(sp)
1892; CHECK-NEXT:    sw a0, 8(sp)
1893; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1894; CHECK-NEXT:    addi a0, sp, 8
1895; CHECK-NEXT:    vlse64.v v28, (a0), zero
1896; CHECK-NEXT:    vmulh.vv v8, v8, v28
1897; CHECK-NEXT:    addi sp, sp, 16
1898; CHECK-NEXT:    ret
1899entry:
1900  %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
1901    <vscale x 4 x i64> %0,
1902    i64 %1,
1903    i32 %2)
1904
1905  ret <vscale x 4 x i64> %a
1906}
1907
1908declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
1909  <vscale x 4 x i64>,
1910  <vscale x 4 x i64>,
1911  i64,
1912  <vscale x 4 x i1>,
1913  i32);
1914
1915define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1916; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64:
1917; CHECK:       # %bb.0: # %entry
1918; CHECK-NEXT:    addi sp, sp, -16
1919; CHECK-NEXT:    sw a1, 12(sp)
1920; CHECK-NEXT:    sw a0, 8(sp)
1921; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1922; CHECK-NEXT:    addi a0, sp, 8
1923; CHECK-NEXT:    vlse64.v v28, (a0), zero
1924; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
1925; CHECK-NEXT:    vmulh.vv v8, v12, v28, v0.t
1926; CHECK-NEXT:    addi sp, sp, 16
1927; CHECK-NEXT:    ret
1928entry:
1929  %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
1930    <vscale x 4 x i64> %0,
1931    <vscale x 4 x i64> %1,
1932    i64 %2,
1933    <vscale x 4 x i1> %3,
1934    i32 %4)
1935
1936  ret <vscale x 4 x i64> %a
1937}
1938
1939declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
1940  <vscale x 8 x i64>,
1941  i64,
1942  i32);
1943
1944define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
1945; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64:
1946; CHECK:       # %bb.0: # %entry
1947; CHECK-NEXT:    addi sp, sp, -16
1948; CHECK-NEXT:    sw a1, 12(sp)
1949; CHECK-NEXT:    sw a0, 8(sp)
1950; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
1951; CHECK-NEXT:    addi a0, sp, 8
1952; CHECK-NEXT:    vlse64.v v16, (a0), zero
1953; CHECK-NEXT:    vmulh.vv v8, v8, v16
1954; CHECK-NEXT:    addi sp, sp, 16
1955; CHECK-NEXT:    ret
1956entry:
1957  %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
1958    <vscale x 8 x i64> %0,
1959    i64 %1,
1960    i32 %2)
1961
1962  ret <vscale x 8 x i64> %a
1963}
1964
1965declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
1966  <vscale x 8 x i64>,
1967  <vscale x 8 x i64>,
1968  i64,
1969  <vscale x 8 x i1>,
1970  i32);
1971
1972define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1973; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64:
1974; CHECK:       # %bb.0: # %entry
1975; CHECK-NEXT:    addi sp, sp, -16
1976; CHECK-NEXT:    sw a1, 12(sp)
1977; CHECK-NEXT:    sw a0, 8(sp)
1978; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
1979; CHECK-NEXT:    addi a0, sp, 8
1980; CHECK-NEXT:    vlse64.v v24, (a0), zero
1981; CHECK-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
1982; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
1983; CHECK-NEXT:    addi sp, sp, 16
1984; CHECK-NEXT:    ret
1985entry:
1986  %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
1987    <vscale x 8 x i64> %0,
1988    <vscale x 8 x i64> %1,
1989    i64 %2,
1990    <vscale x 8 x i1> %3,
1991    i32 %4)
1992
1993  ret <vscale x 8 x i64> %a
1994}
1995