1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
5  <vscale x 1 x i32>*,
6  <vscale x 1 x i64>,
7  <vscale x 1 x i32>,
8  i64);
9
10define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
11; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i64:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
14; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
15; CHECK-NEXT:    vmv1r.v v8, v9
16; CHECK-NEXT:    ret
17entry:
18  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
19    <vscale x 1 x i32> *%0,
20    <vscale x 1 x i64> %1,
21    <vscale x 1 x i32> %2,
22    i64 %3)
23
24  ret <vscale x 1 x i32> %a
25}
26
27declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
28  <vscale x 1 x i32>*,
29  <vscale x 1 x i64>,
30  <vscale x 1 x i32>,
31  <vscale x 1 x i1>,
32  i64);
33
34define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
35; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
38; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
39; CHECK-NEXT:    vmv1r.v v8, v9
40; CHECK-NEXT:    ret
41entry:
42  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
43    <vscale x 1 x i32> *%0,
44    <vscale x 1 x i64> %1,
45    <vscale x 1 x i32> %2,
46    <vscale x 1 x i1> %3,
47    i64 %4)
48
49  ret <vscale x 1 x i32> %a
50}
51
52declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
53  <vscale x 2 x i32>*,
54  <vscale x 2 x i64>,
55  <vscale x 2 x i32>,
56  i64);
57
58define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
59; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i64:
60; CHECK:       # %bb.0: # %entry
61; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
62; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
63; CHECK-NEXT:    vmv1r.v v8, v10
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
67    <vscale x 2 x i32> *%0,
68    <vscale x 2 x i64> %1,
69    <vscale x 2 x i32> %2,
70    i64 %3)
71
72  ret <vscale x 2 x i32> %a
73}
74
75declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
76  <vscale x 2 x i32>*,
77  <vscale x 2 x i64>,
78  <vscale x 2 x i32>,
79  <vscale x 2 x i1>,
80  i64);
81
82define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
83; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
86; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
87; CHECK-NEXT:    vmv1r.v v8, v10
88; CHECK-NEXT:    ret
89entry:
90  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
91    <vscale x 2 x i32> *%0,
92    <vscale x 2 x i64> %1,
93    <vscale x 2 x i32> %2,
94    <vscale x 2 x i1> %3,
95    i64 %4)
96
97  ret <vscale x 2 x i32> %a
98}
99
100declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
101  <vscale x 4 x i32>*,
102  <vscale x 4 x i64>,
103  <vscale x 4 x i32>,
104  i64);
105
106define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
107; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i64:
108; CHECK:       # %bb.0: # %entry
109; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
110; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
111; CHECK-NEXT:    vmv2r.v v8, v12
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
115    <vscale x 4 x i32> *%0,
116    <vscale x 4 x i64> %1,
117    <vscale x 4 x i32> %2,
118    i64 %3)
119
120  ret <vscale x 4 x i32> %a
121}
122
123declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
124  <vscale x 4 x i32>*,
125  <vscale x 4 x i64>,
126  <vscale x 4 x i32>,
127  <vscale x 4 x i1>,
128  i64);
129
130define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
131; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64:
132; CHECK:       # %bb.0: # %entry
133; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
134; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
135; CHECK-NEXT:    vmv2r.v v8, v12
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
139    <vscale x 4 x i32> *%0,
140    <vscale x 4 x i64> %1,
141    <vscale x 4 x i32> %2,
142    <vscale x 4 x i1> %3,
143    i64 %4)
144
145  ret <vscale x 4 x i32> %a
146}
147
148declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
149  <vscale x 8 x i32>*,
150  <vscale x 8 x i64>,
151  <vscale x 8 x i32>,
152  i64);
153
154define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
155; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i64:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
158; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
159; CHECK-NEXT:    vmv4r.v v8, v16
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
163    <vscale x 8 x i32> *%0,
164    <vscale x 8 x i64> %1,
165    <vscale x 8 x i32> %2,
166    i64 %3)
167
168  ret <vscale x 8 x i32> %a
169}
170
171declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
172  <vscale x 8 x i32>*,
173  <vscale x 8 x i64>,
174  <vscale x 8 x i32>,
175  <vscale x 8 x i1>,
176  i64);
177
178define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
179; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
182; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
183; CHECK-NEXT:    vmv4r.v v8, v16
184; CHECK-NEXT:    ret
185entry:
186  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
187    <vscale x 8 x i32> *%0,
188    <vscale x 8 x i64> %1,
189    <vscale x 8 x i32> %2,
190    <vscale x 8 x i1> %3,
191    i64 %4)
192
193  ret <vscale x 8 x i32> %a
194}
195
196declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
197  <vscale x 1 x i64>*,
198  <vscale x 1 x i64>,
199  <vscale x 1 x i64>,
200  i64);
201
202define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
203; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i64:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
206; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
207; CHECK-NEXT:    vmv1r.v v8, v9
208; CHECK-NEXT:    ret
209entry:
210  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
211    <vscale x 1 x i64> *%0,
212    <vscale x 1 x i64> %1,
213    <vscale x 1 x i64> %2,
214    i64 %3)
215
216  ret <vscale x 1 x i64> %a
217}
218
219declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
220  <vscale x 1 x i64>*,
221  <vscale x 1 x i64>,
222  <vscale x 1 x i64>,
223  <vscale x 1 x i1>,
224  i64);
225
226define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
227; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64:
228; CHECK:       # %bb.0: # %entry
229; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
230; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
231; CHECK-NEXT:    vmv1r.v v8, v9
232; CHECK-NEXT:    ret
233entry:
234  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
235    <vscale x 1 x i64> *%0,
236    <vscale x 1 x i64> %1,
237    <vscale x 1 x i64> %2,
238    <vscale x 1 x i1> %3,
239    i64 %4)
240
241  ret <vscale x 1 x i64> %a
242}
243
244declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
245  <vscale x 2 x i64>*,
246  <vscale x 2 x i64>,
247  <vscale x 2 x i64>,
248  i64);
249
250define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
251; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i64:
252; CHECK:       # %bb.0: # %entry
253; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
254; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
255; CHECK-NEXT:    vmv2r.v v8, v10
256; CHECK-NEXT:    ret
257entry:
258  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
259    <vscale x 2 x i64> *%0,
260    <vscale x 2 x i64> %1,
261    <vscale x 2 x i64> %2,
262    i64 %3)
263
264  ret <vscale x 2 x i64> %a
265}
266
267declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
268  <vscale x 2 x i64>*,
269  <vscale x 2 x i64>,
270  <vscale x 2 x i64>,
271  <vscale x 2 x i1>,
272  i64);
273
274define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
275; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
278; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
279; CHECK-NEXT:    vmv2r.v v8, v10
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
283    <vscale x 2 x i64> *%0,
284    <vscale x 2 x i64> %1,
285    <vscale x 2 x i64> %2,
286    <vscale x 2 x i1> %3,
287    i64 %4)
288
289  ret <vscale x 2 x i64> %a
290}
291
292declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
293  <vscale x 4 x i64>*,
294  <vscale x 4 x i64>,
295  <vscale x 4 x i64>,
296  i64);
297
298define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
299; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i64:
300; CHECK:       # %bb.0: # %entry
301; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
302; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
303; CHECK-NEXT:    vmv4r.v v8, v12
304; CHECK-NEXT:    ret
305entry:
306  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
307    <vscale x 4 x i64> *%0,
308    <vscale x 4 x i64> %1,
309    <vscale x 4 x i64> %2,
310    i64 %3)
311
312  ret <vscale x 4 x i64> %a
313}
314
315declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
316  <vscale x 4 x i64>*,
317  <vscale x 4 x i64>,
318  <vscale x 4 x i64>,
319  <vscale x 4 x i1>,
320  i64);
321
322define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
323; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64:
324; CHECK:       # %bb.0: # %entry
325; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
326; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
327; CHECK-NEXT:    vmv4r.v v8, v12
328; CHECK-NEXT:    ret
329entry:
330  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
331    <vscale x 4 x i64> *%0,
332    <vscale x 4 x i64> %1,
333    <vscale x 4 x i64> %2,
334    <vscale x 4 x i1> %3,
335    i64 %4)
336
337  ret <vscale x 4 x i64> %a
338}
339
340declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
341  <vscale x 8 x i64>*,
342  <vscale x 8 x i64>,
343  <vscale x 8 x i64>,
344  i64);
345
346define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
347; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i64:
348; CHECK:       # %bb.0: # %entry
349; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
350; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
351; CHECK-NEXT:    vmv8r.v v8, v16
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
355    <vscale x 8 x i64> *%0,
356    <vscale x 8 x i64> %1,
357    <vscale x 8 x i64> %2,
358    i64 %3)
359
360  ret <vscale x 8 x i64> %a
361}
362
363declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
364  <vscale x 8 x i64>*,
365  <vscale x 8 x i64>,
366  <vscale x 8 x i64>,
367  <vscale x 8 x i1>,
368  i64);
369
370define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
371; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64:
372; CHECK:       # %bb.0: # %entry
373; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
374; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
375; CHECK-NEXT:    vmv8r.v v8, v16
376; CHECK-NEXT:    ret
377entry:
378  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
379    <vscale x 8 x i64> *%0,
380    <vscale x 8 x i64> %1,
381    <vscale x 8 x i64> %2,
382    <vscale x 8 x i1> %3,
383    i64 %4)
384
385  ret <vscale x 8 x i64> %a
386}
387
388declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
389  <vscale x 1 x i32>*,
390  <vscale x 1 x i32>,
391  <vscale x 1 x i32>,
392  i64);
393
394define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
395; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i32:
396; CHECK:       # %bb.0: # %entry
397; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
398; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
399; CHECK-NEXT:    vmv1r.v v8, v9
400; CHECK-NEXT:    ret
401entry:
402  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
403    <vscale x 1 x i32> *%0,
404    <vscale x 1 x i32> %1,
405    <vscale x 1 x i32> %2,
406    i64 %3)
407
408  ret <vscale x 1 x i32> %a
409}
410
411declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
412  <vscale x 1 x i32>*,
413  <vscale x 1 x i32>,
414  <vscale x 1 x i32>,
415  <vscale x 1 x i1>,
416  i64);
417
418define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
419; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
422; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
423; CHECK-NEXT:    vmv1r.v v8, v9
424; CHECK-NEXT:    ret
425entry:
426  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
427    <vscale x 1 x i32> *%0,
428    <vscale x 1 x i32> %1,
429    <vscale x 1 x i32> %2,
430    <vscale x 1 x i1> %3,
431    i64 %4)
432
433  ret <vscale x 1 x i32> %a
434}
435
436declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
437  <vscale x 2 x i32>*,
438  <vscale x 2 x i32>,
439  <vscale x 2 x i32>,
440  i64);
441
442define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
443; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i32:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
446; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
447; CHECK-NEXT:    vmv1r.v v8, v9
448; CHECK-NEXT:    ret
449entry:
450  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
451    <vscale x 2 x i32> *%0,
452    <vscale x 2 x i32> %1,
453    <vscale x 2 x i32> %2,
454    i64 %3)
455
456  ret <vscale x 2 x i32> %a
457}
458
459declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
460  <vscale x 2 x i32>*,
461  <vscale x 2 x i32>,
462  <vscale x 2 x i32>,
463  <vscale x 2 x i1>,
464  i64);
465
466define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
467; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32:
468; CHECK:       # %bb.0: # %entry
469; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
470; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
471; CHECK-NEXT:    vmv1r.v v8, v9
472; CHECK-NEXT:    ret
473entry:
474  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
475    <vscale x 2 x i32> *%0,
476    <vscale x 2 x i32> %1,
477    <vscale x 2 x i32> %2,
478    <vscale x 2 x i1> %3,
479    i64 %4)
480
481  ret <vscale x 2 x i32> %a
482}
483
484declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
485  <vscale x 4 x i32>*,
486  <vscale x 4 x i32>,
487  <vscale x 4 x i32>,
488  i64);
489
490define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
491; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i32:
492; CHECK:       # %bb.0: # %entry
493; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
494; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
495; CHECK-NEXT:    vmv2r.v v8, v10
496; CHECK-NEXT:    ret
497entry:
498  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
499    <vscale x 4 x i32> *%0,
500    <vscale x 4 x i32> %1,
501    <vscale x 4 x i32> %2,
502    i64 %3)
503
504  ret <vscale x 4 x i32> %a
505}
506
507declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
508  <vscale x 4 x i32>*,
509  <vscale x 4 x i32>,
510  <vscale x 4 x i32>,
511  <vscale x 4 x i1>,
512  i64);
513
514define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
515; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32:
516; CHECK:       # %bb.0: # %entry
517; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
518; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
519; CHECK-NEXT:    vmv2r.v v8, v10
520; CHECK-NEXT:    ret
521entry:
522  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
523    <vscale x 4 x i32> *%0,
524    <vscale x 4 x i32> %1,
525    <vscale x 4 x i32> %2,
526    <vscale x 4 x i1> %3,
527    i64 %4)
528
529  ret <vscale x 4 x i32> %a
530}
531
532declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
533  <vscale x 8 x i32>*,
534  <vscale x 8 x i32>,
535  <vscale x 8 x i32>,
536  i64);
537
538define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
539; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i32:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
542; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
543; CHECK-NEXT:    vmv4r.v v8, v12
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
547    <vscale x 8 x i32> *%0,
548    <vscale x 8 x i32> %1,
549    <vscale x 8 x i32> %2,
550    i64 %3)
551
552  ret <vscale x 8 x i32> %a
553}
554
555declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
556  <vscale x 8 x i32>*,
557  <vscale x 8 x i32>,
558  <vscale x 8 x i32>,
559  <vscale x 8 x i1>,
560  i64);
561
562define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
563; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
566; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
567; CHECK-NEXT:    vmv4r.v v8, v12
568; CHECK-NEXT:    ret
569entry:
570  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
571    <vscale x 8 x i32> *%0,
572    <vscale x 8 x i32> %1,
573    <vscale x 8 x i32> %2,
574    <vscale x 8 x i1> %3,
575    i64 %4)
576
577  ret <vscale x 8 x i32> %a
578}
579
580declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
581  <vscale x 16 x i32>*,
582  <vscale x 16 x i32>,
583  <vscale x 16 x i32>,
584  i64);
585
586define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
587; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32:
588; CHECK:       # %bb.0: # %entry
589; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
590; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
591; CHECK-NEXT:    vmv8r.v v8, v16
592; CHECK-NEXT:    ret
593entry:
594  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
595    <vscale x 16 x i32> *%0,
596    <vscale x 16 x i32> %1,
597    <vscale x 16 x i32> %2,
598    i64 %3)
599
600  ret <vscale x 16 x i32> %a
601}
602
603declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
604  <vscale x 16 x i32>*,
605  <vscale x 16 x i32>,
606  <vscale x 16 x i32>,
607  <vscale x 16 x i1>,
608  i64);
609
610define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
611; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32:
612; CHECK:       # %bb.0: # %entry
613; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
614; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
615; CHECK-NEXT:    vmv8r.v v8, v16
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
619    <vscale x 16 x i32> *%0,
620    <vscale x 16 x i32> %1,
621    <vscale x 16 x i32> %2,
622    <vscale x 16 x i1> %3,
623    i64 %4)
624
625  ret <vscale x 16 x i32> %a
626}
627
628declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
629  <vscale x 1 x i64>*,
630  <vscale x 1 x i32>,
631  <vscale x 1 x i64>,
632  i64);
633
634define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
635; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
638; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
639; CHECK-NEXT:    vmv1r.v v8, v9
640; CHECK-NEXT:    ret
641entry:
642  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
643    <vscale x 1 x i64> *%0,
644    <vscale x 1 x i32> %1,
645    <vscale x 1 x i64> %2,
646    i64 %3)
647
648  ret <vscale x 1 x i64> %a
649}
650
651declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
652  <vscale x 1 x i64>*,
653  <vscale x 1 x i32>,
654  <vscale x 1 x i64>,
655  <vscale x 1 x i1>,
656  i64);
657
658define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
659; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32:
660; CHECK:       # %bb.0: # %entry
661; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
662; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
663; CHECK-NEXT:    vmv1r.v v8, v9
664; CHECK-NEXT:    ret
665entry:
666  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
667    <vscale x 1 x i64> *%0,
668    <vscale x 1 x i32> %1,
669    <vscale x 1 x i64> %2,
670    <vscale x 1 x i1> %3,
671    i64 %4)
672
673  ret <vscale x 1 x i64> %a
674}
675
676declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
677  <vscale x 2 x i64>*,
678  <vscale x 2 x i32>,
679  <vscale x 2 x i64>,
680  i64);
681
682define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
683; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i32:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
686; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
687; CHECK-NEXT:    vmv2r.v v8, v10
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
691    <vscale x 2 x i64> *%0,
692    <vscale x 2 x i32> %1,
693    <vscale x 2 x i64> %2,
694    i64 %3)
695
696  ret <vscale x 2 x i64> %a
697}
698
699declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
700  <vscale x 2 x i64>*,
701  <vscale x 2 x i32>,
702  <vscale x 2 x i64>,
703  <vscale x 2 x i1>,
704  i64);
705
706define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
707; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32:
708; CHECK:       # %bb.0: # %entry
709; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
710; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
711; CHECK-NEXT:    vmv2r.v v8, v10
712; CHECK-NEXT:    ret
713entry:
714  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
715    <vscale x 2 x i64> *%0,
716    <vscale x 2 x i32> %1,
717    <vscale x 2 x i64> %2,
718    <vscale x 2 x i1> %3,
719    i64 %4)
720
721  ret <vscale x 2 x i64> %a
722}
723
724declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
725  <vscale x 4 x i64>*,
726  <vscale x 4 x i32>,
727  <vscale x 4 x i64>,
728  i64);
729
730define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
731; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i32:
732; CHECK:       # %bb.0: # %entry
733; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
734; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
735; CHECK-NEXT:    vmv4r.v v8, v12
736; CHECK-NEXT:    ret
737entry:
738  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
739    <vscale x 4 x i64> *%0,
740    <vscale x 4 x i32> %1,
741    <vscale x 4 x i64> %2,
742    i64 %3)
743
744  ret <vscale x 4 x i64> %a
745}
746
747declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
748  <vscale x 4 x i64>*,
749  <vscale x 4 x i32>,
750  <vscale x 4 x i64>,
751  <vscale x 4 x i1>,
752  i64);
753
754define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
755; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32:
756; CHECK:       # %bb.0: # %entry
757; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
758; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
759; CHECK-NEXT:    vmv4r.v v8, v12
760; CHECK-NEXT:    ret
761entry:
762  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
763    <vscale x 4 x i64> *%0,
764    <vscale x 4 x i32> %1,
765    <vscale x 4 x i64> %2,
766    <vscale x 4 x i1> %3,
767    i64 %4)
768
769  ret <vscale x 4 x i64> %a
770}
771
772declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
773  <vscale x 8 x i64>*,
774  <vscale x 8 x i32>,
775  <vscale x 8 x i64>,
776  i64);
777
778define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
779; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i32:
780; CHECK:       # %bb.0: # %entry
781; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
782; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
783; CHECK-NEXT:    vmv8r.v v8, v16
784; CHECK-NEXT:    ret
785entry:
786  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
787    <vscale x 8 x i64> *%0,
788    <vscale x 8 x i32> %1,
789    <vscale x 8 x i64> %2,
790    i64 %3)
791
792  ret <vscale x 8 x i64> %a
793}
794
795declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
796  <vscale x 8 x i64>*,
797  <vscale x 8 x i32>,
798  <vscale x 8 x i64>,
799  <vscale x 8 x i1>,
800  i64);
801
802define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
803; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32:
804; CHECK:       # %bb.0: # %entry
805; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
806; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
807; CHECK-NEXT:    vmv8r.v v8, v16
808; CHECK-NEXT:    ret
809entry:
810  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
811    <vscale x 8 x i64> *%0,
812    <vscale x 8 x i32> %1,
813    <vscale x 8 x i64> %2,
814    <vscale x 8 x i1> %3,
815    i64 %4)
816
817  ret <vscale x 8 x i64> %a
818}
819
820declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
821  <vscale x 1 x i32>*,
822  <vscale x 1 x i16>,
823  <vscale x 1 x i32>,
824  i64);
825
826define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
827; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i16:
828; CHECK:       # %bb.0: # %entry
829; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
830; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
831; CHECK-NEXT:    vmv1r.v v8, v9
832; CHECK-NEXT:    ret
833entry:
834  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
835    <vscale x 1 x i32> *%0,
836    <vscale x 1 x i16> %1,
837    <vscale x 1 x i32> %2,
838    i64 %3)
839
840  ret <vscale x 1 x i32> %a
841}
842
843declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
844  <vscale x 1 x i32>*,
845  <vscale x 1 x i16>,
846  <vscale x 1 x i32>,
847  <vscale x 1 x i1>,
848  i64);
849
850define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
851; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
854; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
855; CHECK-NEXT:    vmv1r.v v8, v9
856; CHECK-NEXT:    ret
857entry:
858  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
859    <vscale x 1 x i32> *%0,
860    <vscale x 1 x i16> %1,
861    <vscale x 1 x i32> %2,
862    <vscale x 1 x i1> %3,
863    i64 %4)
864
865  ret <vscale x 1 x i32> %a
866}
867
868declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
869  <vscale x 2 x i32>*,
870  <vscale x 2 x i16>,
871  <vscale x 2 x i32>,
872  i64);
873
874define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
875; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i16:
876; CHECK:       # %bb.0: # %entry
877; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
878; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
879; CHECK-NEXT:    vmv1r.v v8, v9
880; CHECK-NEXT:    ret
881entry:
882  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
883    <vscale x 2 x i32> *%0,
884    <vscale x 2 x i16> %1,
885    <vscale x 2 x i32> %2,
886    i64 %3)
887
888  ret <vscale x 2 x i32> %a
889}
890
891declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
892  <vscale x 2 x i32>*,
893  <vscale x 2 x i16>,
894  <vscale x 2 x i32>,
895  <vscale x 2 x i1>,
896  i64);
897
898define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
899; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16:
900; CHECK:       # %bb.0: # %entry
901; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
902; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
903; CHECK-NEXT:    vmv1r.v v8, v9
904; CHECK-NEXT:    ret
905entry:
906  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
907    <vscale x 2 x i32> *%0,
908    <vscale x 2 x i16> %1,
909    <vscale x 2 x i32> %2,
910    <vscale x 2 x i1> %3,
911    i64 %4)
912
913  ret <vscale x 2 x i32> %a
914}
915
916declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
917  <vscale x 4 x i32>*,
918  <vscale x 4 x i16>,
919  <vscale x 4 x i32>,
920  i64);
921
922define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
923; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i16:
924; CHECK:       # %bb.0: # %entry
925; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
926; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
927; CHECK-NEXT:    vmv2r.v v8, v10
928; CHECK-NEXT:    ret
929entry:
930  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
931    <vscale x 4 x i32> *%0,
932    <vscale x 4 x i16> %1,
933    <vscale x 4 x i32> %2,
934    i64 %3)
935
936  ret <vscale x 4 x i32> %a
937}
938
939declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
940  <vscale x 4 x i32>*,
941  <vscale x 4 x i16>,
942  <vscale x 4 x i32>,
943  <vscale x 4 x i1>,
944  i64);
945
946define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
947; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16:
948; CHECK:       # %bb.0: # %entry
949; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
950; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
951; CHECK-NEXT:    vmv2r.v v8, v10
952; CHECK-NEXT:    ret
953entry:
954  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
955    <vscale x 4 x i32> *%0,
956    <vscale x 4 x i16> %1,
957    <vscale x 4 x i32> %2,
958    <vscale x 4 x i1> %3,
959    i64 %4)
960
961  ret <vscale x 4 x i32> %a
962}
963
964declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
965  <vscale x 8 x i32>*,
966  <vscale x 8 x i16>,
967  <vscale x 8 x i32>,
968  i64);
969
970define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
971; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i16:
972; CHECK:       # %bb.0: # %entry
973; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
974; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
975; CHECK-NEXT:    vmv4r.v v8, v12
976; CHECK-NEXT:    ret
977entry:
978  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
979    <vscale x 8 x i32> *%0,
980    <vscale x 8 x i16> %1,
981    <vscale x 8 x i32> %2,
982    i64 %3)
983
984  ret <vscale x 8 x i32> %a
985}
986
987declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
988  <vscale x 8 x i32>*,
989  <vscale x 8 x i16>,
990  <vscale x 8 x i32>,
991  <vscale x 8 x i1>,
992  i64);
993
994define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
995; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16:
996; CHECK:       # %bb.0: # %entry
997; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
998; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
999; CHECK-NEXT:    vmv4r.v v8, v12
1000; CHECK-NEXT:    ret
1001entry:
1002  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
1003    <vscale x 8 x i32> *%0,
1004    <vscale x 8 x i16> %1,
1005    <vscale x 8 x i32> %2,
1006    <vscale x 8 x i1> %3,
1007    i64 %4)
1008
1009  ret <vscale x 8 x i32> %a
1010}
1011
1012declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
1013  <vscale x 16 x i32>*,
1014  <vscale x 16 x i16>,
1015  <vscale x 16 x i32>,
1016  i64);
1017
1018define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
1019; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i16:
1020; CHECK:       # %bb.0: # %entry
1021; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1022; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
1023; CHECK-NEXT:    vmv8r.v v8, v16
1024; CHECK-NEXT:    ret
1025entry:
1026  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
1027    <vscale x 16 x i32> *%0,
1028    <vscale x 16 x i16> %1,
1029    <vscale x 16 x i32> %2,
1030    i64 %3)
1031
1032  ret <vscale x 16 x i32> %a
1033}
1034
1035declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
1036  <vscale x 16 x i32>*,
1037  <vscale x 16 x i16>,
1038  <vscale x 16 x i32>,
1039  <vscale x 16 x i1>,
1040  i64);
1041
1042define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1043; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16:
1044; CHECK:       # %bb.0: # %entry
1045; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1046; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
1047; CHECK-NEXT:    vmv8r.v v8, v16
1048; CHECK-NEXT:    ret
1049entry:
1050  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
1051    <vscale x 16 x i32> *%0,
1052    <vscale x 16 x i16> %1,
1053    <vscale x 16 x i32> %2,
1054    <vscale x 16 x i1> %3,
1055    i64 %4)
1056
1057  ret <vscale x 16 x i32> %a
1058}
1059
1060declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
1061  <vscale x 1 x i64>*,
1062  <vscale x 1 x i16>,
1063  <vscale x 1 x i64>,
1064  i64);
1065
1066define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
1067; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i16:
1068; CHECK:       # %bb.0: # %entry
1069; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1070; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
1071; CHECK-NEXT:    vmv1r.v v8, v9
1072; CHECK-NEXT:    ret
1073entry:
1074  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
1075    <vscale x 1 x i64> *%0,
1076    <vscale x 1 x i16> %1,
1077    <vscale x 1 x i64> %2,
1078    i64 %3)
1079
1080  ret <vscale x 1 x i64> %a
1081}
1082
1083declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
1084  <vscale x 1 x i64>*,
1085  <vscale x 1 x i16>,
1086  <vscale x 1 x i64>,
1087  <vscale x 1 x i1>,
1088  i64);
1089
1090define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1091; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16:
1092; CHECK:       # %bb.0: # %entry
1093; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1094; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
1095; CHECK-NEXT:    vmv1r.v v8, v9
1096; CHECK-NEXT:    ret
1097entry:
1098  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
1099    <vscale x 1 x i64> *%0,
1100    <vscale x 1 x i16> %1,
1101    <vscale x 1 x i64> %2,
1102    <vscale x 1 x i1> %3,
1103    i64 %4)
1104
1105  ret <vscale x 1 x i64> %a
1106}
1107
1108declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
1109  <vscale x 2 x i64>*,
1110  <vscale x 2 x i16>,
1111  <vscale x 2 x i64>,
1112  i64);
1113
1114define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
1115; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i16:
1116; CHECK:       # %bb.0: # %entry
1117; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1118; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
1119; CHECK-NEXT:    vmv2r.v v8, v10
1120; CHECK-NEXT:    ret
1121entry:
1122  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
1123    <vscale x 2 x i64> *%0,
1124    <vscale x 2 x i16> %1,
1125    <vscale x 2 x i64> %2,
1126    i64 %3)
1127
1128  ret <vscale x 2 x i64> %a
1129}
1130
1131declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
1132  <vscale x 2 x i64>*,
1133  <vscale x 2 x i16>,
1134  <vscale x 2 x i64>,
1135  <vscale x 2 x i1>,
1136  i64);
1137
1138define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1139; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16:
1140; CHECK:       # %bb.0: # %entry
1141; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1142; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
1143; CHECK-NEXT:    vmv2r.v v8, v10
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
1147    <vscale x 2 x i64> *%0,
1148    <vscale x 2 x i16> %1,
1149    <vscale x 2 x i64> %2,
1150    <vscale x 2 x i1> %3,
1151    i64 %4)
1152
1153  ret <vscale x 2 x i64> %a
1154}
1155
1156declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
1157  <vscale x 4 x i64>*,
1158  <vscale x 4 x i16>,
1159  <vscale x 4 x i64>,
1160  i64);
1161
1162define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1163; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i16:
1164; CHECK:       # %bb.0: # %entry
1165; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1166; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
1167; CHECK-NEXT:    vmv4r.v v8, v12
1168; CHECK-NEXT:    ret
1169entry:
1170  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
1171    <vscale x 4 x i64> *%0,
1172    <vscale x 4 x i16> %1,
1173    <vscale x 4 x i64> %2,
1174    i64 %3)
1175
1176  ret <vscale x 4 x i64> %a
1177}
1178
1179declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
1180  <vscale x 4 x i64>*,
1181  <vscale x 4 x i16>,
1182  <vscale x 4 x i64>,
1183  <vscale x 4 x i1>,
1184  i64);
1185
1186define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1187; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16:
1188; CHECK:       # %bb.0: # %entry
1189; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1190; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
1191; CHECK-NEXT:    vmv4r.v v8, v12
1192; CHECK-NEXT:    ret
1193entry:
1194  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
1195    <vscale x 4 x i64> *%0,
1196    <vscale x 4 x i16> %1,
1197    <vscale x 4 x i64> %2,
1198    <vscale x 4 x i1> %3,
1199    i64 %4)
1200
1201  ret <vscale x 4 x i64> %a
1202}
1203
1204declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
1205  <vscale x 8 x i64>*,
1206  <vscale x 8 x i16>,
1207  <vscale x 8 x i64>,
1208  i64);
1209
1210define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
1211; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i16:
1212; CHECK:       # %bb.0: # %entry
1213; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1214; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
1215; CHECK-NEXT:    vmv8r.v v8, v16
1216; CHECK-NEXT:    ret
1217entry:
1218  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
1219    <vscale x 8 x i64> *%0,
1220    <vscale x 8 x i16> %1,
1221    <vscale x 8 x i64> %2,
1222    i64 %3)
1223
1224  ret <vscale x 8 x i64> %a
1225}
1226
1227declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
1228  <vscale x 8 x i64>*,
1229  <vscale x 8 x i16>,
1230  <vscale x 8 x i64>,
1231  <vscale x 8 x i1>,
1232  i64);
1233
1234define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1235; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16:
1236; CHECK:       # %bb.0: # %entry
1237; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1238; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
1239; CHECK-NEXT:    vmv8r.v v8, v16
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
1243    <vscale x 8 x i64> *%0,
1244    <vscale x 8 x i16> %1,
1245    <vscale x 8 x i64> %2,
1246    <vscale x 8 x i1> %3,
1247    i64 %4)
1248
1249  ret <vscale x 8 x i64> %a
1250}
1251
1252declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
1253  <vscale x 1 x i32>*,
1254  <vscale x 1 x i8>,
1255  <vscale x 1 x i32>,
1256  i64);
1257
1258define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
1259; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i8:
1260; CHECK:       # %bb.0: # %entry
1261; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1262; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
1263; CHECK-NEXT:    vmv1r.v v8, v9
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
1267    <vscale x 1 x i32> *%0,
1268    <vscale x 1 x i8> %1,
1269    <vscale x 1 x i32> %2,
1270    i64 %3)
1271
1272  ret <vscale x 1 x i32> %a
1273}
1274
1275declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
1276  <vscale x 1 x i32>*,
1277  <vscale x 1 x i8>,
1278  <vscale x 1 x i32>,
1279  <vscale x 1 x i1>,
1280  i64);
1281
1282define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1283; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8:
1284; CHECK:       # %bb.0: # %entry
1285; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1286; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
1287; CHECK-NEXT:    vmv1r.v v8, v9
1288; CHECK-NEXT:    ret
1289entry:
1290  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
1291    <vscale x 1 x i32> *%0,
1292    <vscale x 1 x i8> %1,
1293    <vscale x 1 x i32> %2,
1294    <vscale x 1 x i1> %3,
1295    i64 %4)
1296
1297  ret <vscale x 1 x i32> %a
1298}
1299
1300declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
1301  <vscale x 2 x i32>*,
1302  <vscale x 2 x i8>,
1303  <vscale x 2 x i32>,
1304  i64);
1305
1306define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
1307; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i8:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1310; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
1311; CHECK-NEXT:    vmv1r.v v8, v9
1312; CHECK-NEXT:    ret
1313entry:
1314  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
1315    <vscale x 2 x i32> *%0,
1316    <vscale x 2 x i8> %1,
1317    <vscale x 2 x i32> %2,
1318    i64 %3)
1319
1320  ret <vscale x 2 x i32> %a
1321}
1322
1323declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
1324  <vscale x 2 x i32>*,
1325  <vscale x 2 x i8>,
1326  <vscale x 2 x i32>,
1327  <vscale x 2 x i1>,
1328  i64);
1329
1330define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1331; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8:
1332; CHECK:       # %bb.0: # %entry
1333; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1334; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
1335; CHECK-NEXT:    vmv1r.v v8, v9
1336; CHECK-NEXT:    ret
1337entry:
1338  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
1339    <vscale x 2 x i32> *%0,
1340    <vscale x 2 x i8> %1,
1341    <vscale x 2 x i32> %2,
1342    <vscale x 2 x i1> %3,
1343    i64 %4)
1344
1345  ret <vscale x 2 x i32> %a
1346}
1347
1348declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
1349  <vscale x 4 x i32>*,
1350  <vscale x 4 x i8>,
1351  <vscale x 4 x i32>,
1352  i64);
1353
1354define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
1355; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i8:
1356; CHECK:       # %bb.0: # %entry
1357; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1358; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
1359; CHECK-NEXT:    vmv2r.v v8, v10
1360; CHECK-NEXT:    ret
1361entry:
1362  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
1363    <vscale x 4 x i32> *%0,
1364    <vscale x 4 x i8> %1,
1365    <vscale x 4 x i32> %2,
1366    i64 %3)
1367
1368  ret <vscale x 4 x i32> %a
1369}
1370
1371declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
1372  <vscale x 4 x i32>*,
1373  <vscale x 4 x i8>,
1374  <vscale x 4 x i32>,
1375  <vscale x 4 x i1>,
1376  i64);
1377
1378define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1379; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8:
1380; CHECK:       # %bb.0: # %entry
1381; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1382; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
1383; CHECK-NEXT:    vmv2r.v v8, v10
1384; CHECK-NEXT:    ret
1385entry:
1386  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
1387    <vscale x 4 x i32> *%0,
1388    <vscale x 4 x i8> %1,
1389    <vscale x 4 x i32> %2,
1390    <vscale x 4 x i1> %3,
1391    i64 %4)
1392
1393  ret <vscale x 4 x i32> %a
1394}
1395
1396declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
1397  <vscale x 8 x i32>*,
1398  <vscale x 8 x i8>,
1399  <vscale x 8 x i32>,
1400  i64);
1401
1402define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
1403; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i8:
1404; CHECK:       # %bb.0: # %entry
1405; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1406; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
1407; CHECK-NEXT:    vmv4r.v v8, v12
1408; CHECK-NEXT:    ret
1409entry:
1410  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
1411    <vscale x 8 x i32> *%0,
1412    <vscale x 8 x i8> %1,
1413    <vscale x 8 x i32> %2,
1414    i64 %3)
1415
1416  ret <vscale x 8 x i32> %a
1417}
1418
1419declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
1420  <vscale x 8 x i32>*,
1421  <vscale x 8 x i8>,
1422  <vscale x 8 x i32>,
1423  <vscale x 8 x i1>,
1424  i64);
1425
1426define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1427; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8:
1428; CHECK:       # %bb.0: # %entry
1429; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1430; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
1431; CHECK-NEXT:    vmv4r.v v8, v12
1432; CHECK-NEXT:    ret
1433entry:
1434  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
1435    <vscale x 8 x i32> *%0,
1436    <vscale x 8 x i8> %1,
1437    <vscale x 8 x i32> %2,
1438    <vscale x 8 x i1> %3,
1439    i64 %4)
1440
1441  ret <vscale x 8 x i32> %a
1442}
1443
1444declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
1445  <vscale x 16 x i32>*,
1446  <vscale x 16 x i8>,
1447  <vscale x 16 x i32>,
1448  i64);
1449
1450define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
1451; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i8:
1452; CHECK:       # %bb.0: # %entry
1453; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1454; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
1455; CHECK-NEXT:    vmv8r.v v8, v16
1456; CHECK-NEXT:    ret
1457entry:
1458  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
1459    <vscale x 16 x i32> *%0,
1460    <vscale x 16 x i8> %1,
1461    <vscale x 16 x i32> %2,
1462    i64 %3)
1463
1464  ret <vscale x 16 x i32> %a
1465}
1466
1467declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
1468  <vscale x 16 x i32>*,
1469  <vscale x 16 x i8>,
1470  <vscale x 16 x i32>,
1471  <vscale x 16 x i1>,
1472  i64);
1473
1474define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1475; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8:
1476; CHECK:       # %bb.0: # %entry
1477; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1478; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
1479; CHECK-NEXT:    vmv8r.v v8, v16
1480; CHECK-NEXT:    ret
1481entry:
1482  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
1483    <vscale x 16 x i32> *%0,
1484    <vscale x 16 x i8> %1,
1485    <vscale x 16 x i32> %2,
1486    <vscale x 16 x i1> %3,
1487    i64 %4)
1488
1489  ret <vscale x 16 x i32> %a
1490}
1491
1492declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
1493  <vscale x 1 x i64>*,
1494  <vscale x 1 x i8>,
1495  <vscale x 1 x i64>,
1496  i64);
1497
1498define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
1499; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i8:
1500; CHECK:       # %bb.0: # %entry
1501; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1502; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
1503; CHECK-NEXT:    vmv1r.v v8, v9
1504; CHECK-NEXT:    ret
1505entry:
1506  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
1507    <vscale x 1 x i64> *%0,
1508    <vscale x 1 x i8> %1,
1509    <vscale x 1 x i64> %2,
1510    i64 %3)
1511
1512  ret <vscale x 1 x i64> %a
1513}
1514
1515declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
1516  <vscale x 1 x i64>*,
1517  <vscale x 1 x i8>,
1518  <vscale x 1 x i64>,
1519  <vscale x 1 x i1>,
1520  i64);
1521
1522define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1523; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8:
1524; CHECK:       # %bb.0: # %entry
1525; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1526; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
1527; CHECK-NEXT:    vmv1r.v v8, v9
1528; CHECK-NEXT:    ret
1529entry:
1530  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
1531    <vscale x 1 x i64> *%0,
1532    <vscale x 1 x i8> %1,
1533    <vscale x 1 x i64> %2,
1534    <vscale x 1 x i1> %3,
1535    i64 %4)
1536
1537  ret <vscale x 1 x i64> %a
1538}
1539
1540declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
1541  <vscale x 2 x i64>*,
1542  <vscale x 2 x i8>,
1543  <vscale x 2 x i64>,
1544  i64);
1545
1546define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
1547; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i8:
1548; CHECK:       # %bb.0: # %entry
1549; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1550; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
1551; CHECK-NEXT:    vmv2r.v v8, v10
1552; CHECK-NEXT:    ret
1553entry:
1554  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
1555    <vscale x 2 x i64> *%0,
1556    <vscale x 2 x i8> %1,
1557    <vscale x 2 x i64> %2,
1558    i64 %3)
1559
1560  ret <vscale x 2 x i64> %a
1561}
1562
1563declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
1564  <vscale x 2 x i64>*,
1565  <vscale x 2 x i8>,
1566  <vscale x 2 x i64>,
1567  <vscale x 2 x i1>,
1568  i64);
1569
1570define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1571; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8:
1572; CHECK:       # %bb.0: # %entry
1573; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1574; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
1575; CHECK-NEXT:    vmv2r.v v8, v10
1576; CHECK-NEXT:    ret
1577entry:
1578  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
1579    <vscale x 2 x i64> *%0,
1580    <vscale x 2 x i8> %1,
1581    <vscale x 2 x i64> %2,
1582    <vscale x 2 x i1> %3,
1583    i64 %4)
1584
1585  ret <vscale x 2 x i64> %a
1586}
1587
1588declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
1589  <vscale x 4 x i64>*,
1590  <vscale x 4 x i8>,
1591  <vscale x 4 x i64>,
1592  i64);
1593
1594define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1595; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i8:
1596; CHECK:       # %bb.0: # %entry
1597; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1598; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
1599; CHECK-NEXT:    vmv4r.v v8, v12
1600; CHECK-NEXT:    ret
1601entry:
1602  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
1603    <vscale x 4 x i64> *%0,
1604    <vscale x 4 x i8> %1,
1605    <vscale x 4 x i64> %2,
1606    i64 %3)
1607
1608  ret <vscale x 4 x i64> %a
1609}
1610
1611declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
1612  <vscale x 4 x i64>*,
1613  <vscale x 4 x i8>,
1614  <vscale x 4 x i64>,
1615  <vscale x 4 x i1>,
1616  i64);
1617
1618define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1619; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8:
1620; CHECK:       # %bb.0: # %entry
1621; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1622; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
1623; CHECK-NEXT:    vmv4r.v v8, v12
1624; CHECK-NEXT:    ret
1625entry:
1626  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
1627    <vscale x 4 x i64> *%0,
1628    <vscale x 4 x i8> %1,
1629    <vscale x 4 x i64> %2,
1630    <vscale x 4 x i1> %3,
1631    i64 %4)
1632
1633  ret <vscale x 4 x i64> %a
1634}
1635
1636declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
1637  <vscale x 8 x i64>*,
1638  <vscale x 8 x i8>,
1639  <vscale x 8 x i64>,
1640  i64);
1641
1642define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
1643; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i8:
1644; CHECK:       # %bb.0: # %entry
1645; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1646; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
1647; CHECK-NEXT:    vmv8r.v v8, v16
1648; CHECK-NEXT:    ret
1649entry:
1650  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
1651    <vscale x 8 x i64> *%0,
1652    <vscale x 8 x i8> %1,
1653    <vscale x 8 x i64> %2,
1654    i64 %3)
1655
1656  ret <vscale x 8 x i64> %a
1657}
1658
1659declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
1660  <vscale x 8 x i64>*,
1661  <vscale x 8 x i8>,
1662  <vscale x 8 x i64>,
1663  <vscale x 8 x i1>,
1664  i64);
1665
1666define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1667; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8:
1668; CHECK:       # %bb.0: # %entry
1669; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1670; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
1671; CHECK-NEXT:    vmv8r.v v8, v16
1672; CHECK-NEXT:    ret
1673entry:
1674  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
1675    <vscale x 8 x i64> *%0,
1676    <vscale x 8 x i8> %1,
1677    <vscale x 8 x i64> %2,
1678    <vscale x 8 x i1> %3,
1679    i64 %4)
1680
1681  ret <vscale x 8 x i64> %a
1682}
1683