1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
11 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
13 //
test_vamoaddei8_v_i32mf2(int32_t * base,vuint8mf8_t bindex,vint32mf2_t value,size_t vl)14 vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
15   return vamoaddei8_v_i32mf2(base, bindex, value, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
22 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
24 //
test_vamoaddei8_v_i32m1(int32_t * base,vuint8mf4_t bindex,vint32m1_t value,size_t vl)25 vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
26   return vamoaddei8_v_i32m1(base, bindex, value, vl);
27 }
28 
29 //
30 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
31 // CHECK-RV64-NEXT:  entry:
32 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
33 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
35 //
test_vamoaddei8_v_i32m2(int32_t * base,vuint8mf2_t bindex,vint32m2_t value,size_t vl)36 vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
37   return vamoaddei8_v_i32m2(base, bindex, value, vl);
38 }
39 
40 //
41 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
42 // CHECK-RV64-NEXT:  entry:
43 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
44 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
46 //
test_vamoaddei8_v_i32m4(int32_t * base,vuint8m1_t bindex,vint32m4_t value,size_t vl)47 vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
48   return vamoaddei8_v_i32m4(base, bindex, value, vl);
49 }
50 
51 //
52 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
53 // CHECK-RV64-NEXT:  entry:
54 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
55 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
57 //
test_vamoaddei8_v_i32m8(int32_t * base,vuint8m2_t bindex,vint32m8_t value,size_t vl)58 vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
59   return vamoaddei8_v_i32m8(base, bindex, value, vl);
60 }
61 
62 //
63 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
66 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
68 //
test_vamoaddei16_v_i32mf2(int32_t * base,vuint16mf4_t bindex,vint32mf2_t value,size_t vl)69 vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
70   return vamoaddei16_v_i32mf2(base, bindex, value, vl);
71 }
72 
73 //
74 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
75 // CHECK-RV64-NEXT:  entry:
76 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
77 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
79 //
test_vamoaddei16_v_i32m1(int32_t * base,vuint16mf2_t bindex,vint32m1_t value,size_t vl)80 vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
81   return vamoaddei16_v_i32m1(base, bindex, value, vl);
82 }
83 
84 //
85 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
86 // CHECK-RV64-NEXT:  entry:
87 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
88 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
90 //
test_vamoaddei16_v_i32m2(int32_t * base,vuint16m1_t bindex,vint32m2_t value,size_t vl)91 vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
92   return vamoaddei16_v_i32m2(base, bindex, value, vl);
93 }
94 
95 //
96 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
97 // CHECK-RV64-NEXT:  entry:
98 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
99 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
101 //
test_vamoaddei16_v_i32m4(int32_t * base,vuint16m2_t bindex,vint32m4_t value,size_t vl)102 vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
103   return vamoaddei16_v_i32m4(base, bindex, value, vl);
104 }
105 
106 //
107 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
108 // CHECK-RV64-NEXT:  entry:
109 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
110 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
112 //
test_vamoaddei16_v_i32m8(int32_t * base,vuint16m4_t bindex,vint32m8_t value,size_t vl)113 vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
114   return vamoaddei16_v_i32m8(base, bindex, value, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
121 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
123 //
test_vamoaddei32_v_i32mf2(int32_t * base,vuint32mf2_t bindex,vint32mf2_t value,size_t vl)124 vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
125   return vamoaddei32_v_i32mf2(base, bindex, value, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
132 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
134 //
test_vamoaddei32_v_i32m1(int32_t * base,vuint32m1_t bindex,vint32m1_t value,size_t vl)135 vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
136   return vamoaddei32_v_i32m1(base, bindex, value, vl);
137 }
138 
139 //
140 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
141 // CHECK-RV64-NEXT:  entry:
142 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
143 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
145 //
test_vamoaddei32_v_i32m2(int32_t * base,vuint32m2_t bindex,vint32m2_t value,size_t vl)146 vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
147   return vamoaddei32_v_i32m2(base, bindex, value, vl);
148 }
149 
150 //
151 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
152 // CHECK-RV64-NEXT:  entry:
153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
154 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
156 //
test_vamoaddei32_v_i32m4(int32_t * base,vuint32m4_t bindex,vint32m4_t value,size_t vl)157 vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
158   return vamoaddei32_v_i32m4(base, bindex, value, vl);
159 }
160 
161 //
162 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
163 // CHECK-RV64-NEXT:  entry:
164 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
165 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
167 //
test_vamoaddei32_v_i32m8(int32_t * base,vuint32m8_t bindex,vint32m8_t value,size_t vl)168 vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
169   return vamoaddei32_v_i32m8(base, bindex, value, vl);
170 }
171 
172 //
173 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
174 // CHECK-RV64-NEXT:  entry:
175 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
176 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
178 //
test_vamoaddei64_v_i32mf2(int32_t * base,vuint64m1_t bindex,vint32mf2_t value,size_t vl)179 vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
180   return vamoaddei64_v_i32mf2(base, bindex, value, vl);
181 }
182 
183 //
184 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
185 // CHECK-RV64-NEXT:  entry:
186 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
187 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
189 //
test_vamoaddei64_v_i32m1(int32_t * base,vuint64m2_t bindex,vint32m1_t value,size_t vl)190 vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
191   return vamoaddei64_v_i32m1(base, bindex, value, vl);
192 }
193 
194 //
195 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
196 // CHECK-RV64-NEXT:  entry:
197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
198 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
200 //
test_vamoaddei64_v_i32m2(int32_t * base,vuint64m4_t bindex,vint32m2_t value,size_t vl)201 vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
202   return vamoaddei64_v_i32m2(base, bindex, value, vl);
203 }
204 
205 //
206 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
209 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
211 //
test_vamoaddei64_v_i32m4(int32_t * base,vuint64m8_t bindex,vint32m4_t value,size_t vl)212 vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
213   return vamoaddei64_v_i32m4(base, bindex, value, vl);
214 }
215 
216 //
217 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
218 // CHECK-RV64-NEXT:  entry:
219 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
220 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
222 //
test_vamoaddei8_v_i64m1(int64_t * base,vuint8mf8_t bindex,vint64m1_t value,size_t vl)223 vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
224   return vamoaddei8_v_i64m1(base, bindex, value, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
231 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
233 //
test_vamoaddei8_v_i64m2(int64_t * base,vuint8mf4_t bindex,vint64m2_t value,size_t vl)234 vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
235   return vamoaddei8_v_i64m2(base, bindex, value, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
242 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
244 //
test_vamoaddei8_v_i64m4(int64_t * base,vuint8mf2_t bindex,vint64m4_t value,size_t vl)245 vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
246   return vamoaddei8_v_i64m4(base, bindex, value, vl);
247 }
248 
249 //
250 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
253 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
255 //
test_vamoaddei8_v_i64m8(int64_t * base,vuint8m1_t bindex,vint64m8_t value,size_t vl)256 vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
257   return vamoaddei8_v_i64m8(base, bindex, value, vl);
258 }
259 
260 //
261 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
264 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
265 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
266 //
test_vamoaddei16_v_i64m1(int64_t * base,vuint16mf4_t bindex,vint64m1_t value,size_t vl)267 vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
268   return vamoaddei16_v_i64m1(base, bindex, value, vl);
269 }
270 
271 //
272 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
273 // CHECK-RV64-NEXT:  entry:
274 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
275 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
276 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
277 //
test_vamoaddei16_v_i64m2(int64_t * base,vuint16mf2_t bindex,vint64m2_t value,size_t vl)278 vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
279   return vamoaddei16_v_i64m2(base, bindex, value, vl);
280 }
281 
282 //
283 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
284 // CHECK-RV64-NEXT:  entry:
285 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
286 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
288 //
test_vamoaddei16_v_i64m4(int64_t * base,vuint16m1_t bindex,vint64m4_t value,size_t vl)289 vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
290   return vamoaddei16_v_i64m4(base, bindex, value, vl);
291 }
292 
293 //
294 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
295 // CHECK-RV64-NEXT:  entry:
296 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
297 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
299 //
test_vamoaddei16_v_i64m8(int64_t * base,vuint16m2_t bindex,vint64m8_t value,size_t vl)300 vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
301   return vamoaddei16_v_i64m8(base, bindex, value, vl);
302 }
303 
304 //
305 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
306 // CHECK-RV64-NEXT:  entry:
307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
308 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
309 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
310 //
test_vamoaddei32_v_i64m1(int64_t * base,vuint32mf2_t bindex,vint64m1_t value,size_t vl)311 vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
312   return vamoaddei32_v_i64m1(base, bindex, value, vl);
313 }
314 
315 //
316 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
317 // CHECK-RV64-NEXT:  entry:
318 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
319 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
320 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
321 //
test_vamoaddei32_v_i64m2(int64_t * base,vuint32m1_t bindex,vint64m2_t value,size_t vl)322 vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
323   return vamoaddei32_v_i64m2(base, bindex, value, vl);
324 }
325 
326 //
327 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
328 // CHECK-RV64-NEXT:  entry:
329 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
330 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
332 //
test_vamoaddei32_v_i64m4(int64_t * base,vuint32m2_t bindex,vint64m4_t value,size_t vl)333 vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
334   return vamoaddei32_v_i64m4(base, bindex, value, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
341 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
343 //
test_vamoaddei32_v_i64m8(int64_t * base,vuint32m4_t bindex,vint64m8_t value,size_t vl)344 vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
345   return vamoaddei32_v_i64m8(base, bindex, value, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
352 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
354 //
test_vamoaddei64_v_i64m1(int64_t * base,vuint64m1_t bindex,vint64m1_t value,size_t vl)355 vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
356   return vamoaddei64_v_i64m1(base, bindex, value, vl);
357 }
358 
359 //
360 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
363 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
365 //
test_vamoaddei64_v_i64m2(int64_t * base,vuint64m2_t bindex,vint64m2_t value,size_t vl)366 vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
367   return vamoaddei64_v_i64m2(base, bindex, value, vl);
368 }
369 
370 //
371 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
372 // CHECK-RV64-NEXT:  entry:
373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
374 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
375 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
376 //
test_vamoaddei64_v_i64m4(int64_t * base,vuint64m4_t bindex,vint64m4_t value,size_t vl)377 vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
378   return vamoaddei64_v_i64m4(base, bindex, value, vl);
379 }
380 
381 //
382 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
383 // CHECK-RV64-NEXT:  entry:
384 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
385 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
386 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
387 //
test_vamoaddei64_v_i64m8(int64_t * base,vuint64m8_t bindex,vint64m8_t value,size_t vl)388 vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
389   return vamoaddei64_v_i64m8(base, bindex, value, vl);
390 }
391 
392 //
393 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
394 // CHECK-RV64-NEXT:  entry:
395 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
396 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
398 //
test_vamoaddei8_v_u32mf2(uint32_t * base,vuint8mf8_t bindex,vuint32mf2_t value,size_t vl)399 vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
400   return vamoaddei8_v_u32mf2(base, bindex, value, vl);
401 }
402 
403 //
404 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
407 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
408 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
409 //
test_vamoaddei8_v_u32m1(uint32_t * base,vuint8mf4_t bindex,vuint32m1_t value,size_t vl)410 vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
411   return vamoaddei8_v_u32m1(base, bindex, value, vl);
412 }
413 
414 //
415 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
416 // CHECK-RV64-NEXT:  entry:
417 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
418 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
419 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
420 //
test_vamoaddei8_v_u32m2(uint32_t * base,vuint8mf2_t bindex,vuint32m2_t value,size_t vl)421 vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
422   return vamoaddei8_v_u32m2(base, bindex, value, vl);
423 }
424 
425 //
426 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
427 // CHECK-RV64-NEXT:  entry:
428 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
429 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
430 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
431 //
test_vamoaddei8_v_u32m4(uint32_t * base,vuint8m1_t bindex,vuint32m4_t value,size_t vl)432 vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
433   return vamoaddei8_v_u32m4(base, bindex, value, vl);
434 }
435 
436 //
437 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
438 // CHECK-RV64-NEXT:  entry:
439 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
440 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
442 //
test_vamoaddei8_v_u32m8(uint32_t * base,vuint8m2_t bindex,vuint32m8_t value,size_t vl)443 vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
444   return vamoaddei8_v_u32m8(base, bindex, value, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
451 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
453 //
test_vamoaddei16_v_u32mf2(uint32_t * base,vuint16mf4_t bindex,vuint32mf2_t value,size_t vl)454 vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
455   return vamoaddei16_v_u32mf2(base, bindex, value, vl);
456 }
457 
458 //
459 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
462 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
464 //
test_vamoaddei16_v_u32m1(uint32_t * base,vuint16mf2_t bindex,vuint32m1_t value,size_t vl)465 vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
466   return vamoaddei16_v_u32m1(base, bindex, value, vl);
467 }
468 
469 //
470 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
471 // CHECK-RV64-NEXT:  entry:
472 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
473 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
474 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
475 //
test_vamoaddei16_v_u32m2(uint32_t * base,vuint16m1_t bindex,vuint32m2_t value,size_t vl)476 vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
477   return vamoaddei16_v_u32m2(base, bindex, value, vl);
478 }
479 
480 //
481 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
482 // CHECK-RV64-NEXT:  entry:
483 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
484 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
485 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
486 //
test_vamoaddei16_v_u32m4(uint32_t * base,vuint16m2_t bindex,vuint32m4_t value,size_t vl)487 vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
488   return vamoaddei16_v_u32m4(base, bindex, value, vl);
489 }
490 
491 //
492 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
493 // CHECK-RV64-NEXT:  entry:
494 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
495 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
497 //
test_vamoaddei16_v_u32m8(uint32_t * base,vuint16m4_t bindex,vuint32m8_t value,size_t vl)498 vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
499   return vamoaddei16_v_u32m8(base, bindex, value, vl);
500 }
501 
502 //
503 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
504 // CHECK-RV64-NEXT:  entry:
505 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
506 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
508 //
test_vamoaddei32_v_u32mf2(uint32_t * base,vuint32mf2_t bindex,vuint32mf2_t value,size_t vl)509 vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
510   return vamoaddei32_v_u32mf2(base, bindex, value, vl);
511 }
512 
513 //
514 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
515 // CHECK-RV64-NEXT:  entry:
516 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
517 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
518 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
519 //
test_vamoaddei32_v_u32m1(uint32_t * base,vuint32m1_t bindex,vuint32m1_t value,size_t vl)520 vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
521   return vamoaddei32_v_u32m1(base, bindex, value, vl);
522 }
523 
524 //
525 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
526 // CHECK-RV64-NEXT:  entry:
527 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
528 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
529 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
530 //
test_vamoaddei32_v_u32m2(uint32_t * base,vuint32m2_t bindex,vuint32m2_t value,size_t vl)531 vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
532   return vamoaddei32_v_u32m2(base, bindex, value, vl);
533 }
534 
535 //
536 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
537 // CHECK-RV64-NEXT:  entry:
538 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
539 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
540 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
541 //
test_vamoaddei32_v_u32m4(uint32_t * base,vuint32m4_t bindex,vuint32m4_t value,size_t vl)542 vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
543   return vamoaddei32_v_u32m4(base, bindex, value, vl);
544 }
545 
546 //
547 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
548 // CHECK-RV64-NEXT:  entry:
549 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
550 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
552 //
test_vamoaddei32_v_u32m8(uint32_t * base,vuint32m8_t bindex,vuint32m8_t value,size_t vl)553 vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
554   return vamoaddei32_v_u32m8(base, bindex, value, vl);
555 }
556 
557 //
558 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
561 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
563 //
test_vamoaddei64_v_u32mf2(uint32_t * base,vuint64m1_t bindex,vuint32mf2_t value,size_t vl)564 vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
565   return vamoaddei64_v_u32mf2(base, bindex, value, vl);
566 }
567 
568 //
569 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
570 // CHECK-RV64-NEXT:  entry:
571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
572 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
573 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
574 //
test_vamoaddei64_v_u32m1(uint32_t * base,vuint64m2_t bindex,vuint32m1_t value,size_t vl)575 vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
576   return vamoaddei64_v_u32m1(base, bindex, value, vl);
577 }
578 
579 //
580 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
581 // CHECK-RV64-NEXT:  entry:
582 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
583 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
584 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
585 //
test_vamoaddei64_v_u32m2(uint32_t * base,vuint64m4_t bindex,vuint32m2_t value,size_t vl)586 vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
587   return vamoaddei64_v_u32m2(base, bindex, value, vl);
588 }
589 
590 //
591 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
592 // CHECK-RV64-NEXT:  entry:
593 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
594 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
596 //
test_vamoaddei64_v_u32m4(uint32_t * base,vuint64m8_t bindex,vuint32m4_t value,size_t vl)597 vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
598   return vamoaddei64_v_u32m4(base, bindex, value, vl);
599 }
600 
601 //
602 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
603 // CHECK-RV64-NEXT:  entry:
604 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
605 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
606 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
607 //
test_vamoaddei8_v_u64m1(uint64_t * base,vuint8mf8_t bindex,vuint64m1_t value,size_t vl)608 vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
609   return vamoaddei8_v_u64m1(base, bindex, value, vl);
610 }
611 
612 //
613 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
614 // CHECK-RV64-NEXT:  entry:
615 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
616 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
617 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
618 //
test_vamoaddei8_v_u64m2(uint64_t * base,vuint8mf4_t bindex,vuint64m2_t value,size_t vl)619 vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
620   return vamoaddei8_v_u64m2(base, bindex, value, vl);
621 }
622 
623 //
624 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
625 // CHECK-RV64-NEXT:  entry:
626 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
627 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
628 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
629 //
test_vamoaddei8_v_u64m4(uint64_t * base,vuint8mf2_t bindex,vuint64m4_t value,size_t vl)630 vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
631   return vamoaddei8_v_u64m4(base, bindex, value, vl);
632 }
633 
634 //
635 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
636 // CHECK-RV64-NEXT:  entry:
637 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
638 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
639 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
640 //
test_vamoaddei8_v_u64m8(uint64_t * base,vuint8m1_t bindex,vuint64m8_t value,size_t vl)641 vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
642   return vamoaddei8_v_u64m8(base, bindex, value, vl);
643 }
644 
645 //
646 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
647 // CHECK-RV64-NEXT:  entry:
648 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
649 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
651 //
test_vamoaddei16_v_u64m1(uint64_t * base,vuint16mf4_t bindex,vuint64m1_t value,size_t vl)652 vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
653   return vamoaddei16_v_u64m1(base, bindex, value, vl);
654 }
655 
656 //
657 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
658 // CHECK-RV64-NEXT:  entry:
659 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
660 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
662 //
test_vamoaddei16_v_u64m2(uint64_t * base,vuint16mf2_t bindex,vuint64m2_t value,size_t vl)663 vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
664   return vamoaddei16_v_u64m2(base, bindex, value, vl);
665 }
666 
667 //
668 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
669 // CHECK-RV64-NEXT:  entry:
670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
671 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
673 //
test_vamoaddei16_v_u64m4(uint64_t * base,vuint16m1_t bindex,vuint64m4_t value,size_t vl)674 vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
675   return vamoaddei16_v_u64m4(base, bindex, value, vl);
676 }
677 
678 //
679 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
680 // CHECK-RV64-NEXT:  entry:
681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
682 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
683 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
684 //
test_vamoaddei16_v_u64m8(uint64_t * base,vuint16m2_t bindex,vuint64m8_t value,size_t vl)685 vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
686   return vamoaddei16_v_u64m8(base, bindex, value, vl);
687 }
688 
689 //
690 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
691 // CHECK-RV64-NEXT:  entry:
692 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
693 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
695 //
test_vamoaddei32_v_u64m1(uint64_t * base,vuint32mf2_t bindex,vuint64m1_t value,size_t vl)696 vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
697   return vamoaddei32_v_u64m1(base, bindex, value, vl);
698 }
699 
700 //
701 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
702 // CHECK-RV64-NEXT:  entry:
703 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
704 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
705 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
706 //
test_vamoaddei32_v_u64m2(uint64_t * base,vuint32m1_t bindex,vuint64m2_t value,size_t vl)707 vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
708   return vamoaddei32_v_u64m2(base, bindex, value, vl);
709 }
710 
711 //
712 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
713 // CHECK-RV64-NEXT:  entry:
714 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
715 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
716 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
717 //
test_vamoaddei32_v_u64m4(uint64_t * base,vuint32m2_t bindex,vuint64m4_t value,size_t vl)718 vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
719   return vamoaddei32_v_u64m4(base, bindex, value, vl);
720 }
721 
722 //
723 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
724 // CHECK-RV64-NEXT:  entry:
725 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
726 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
727 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
728 //
test_vamoaddei32_v_u64m8(uint64_t * base,vuint32m4_t bindex,vuint64m8_t value,size_t vl)729 vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
730   return vamoaddei32_v_u64m8(base, bindex, value, vl);
731 }
732 
733 //
734 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
735 // CHECK-RV64-NEXT:  entry:
736 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
737 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
738 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
739 //
test_vamoaddei64_v_u64m1(uint64_t * base,vuint64m1_t bindex,vuint64m1_t value,size_t vl)740 vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
741   return vamoaddei64_v_u64m1(base, bindex, value, vl);
742 }
743 
744 //
745 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
746 // CHECK-RV64-NEXT:  entry:
747 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
748 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
749 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
750 //
test_vamoaddei64_v_u64m2(uint64_t * base,vuint64m2_t bindex,vuint64m2_t value,size_t vl)751 vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
752   return vamoaddei64_v_u64m2(base, bindex, value, vl);
753 }
754 
755 //
756 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
757 // CHECK-RV64-NEXT:  entry:
758 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
759 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
760 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
761 //
test_vamoaddei64_v_u64m4(uint64_t * base,vuint64m4_t bindex,vuint64m4_t value,size_t vl)762 vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
763   return vamoaddei64_v_u64m4(base, bindex, value, vl);
764 }
765 
766 //
767 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
768 // CHECK-RV64-NEXT:  entry:
769 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
770 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
772 //
test_vamoaddei64_v_u64m8(uint64_t * base,vuint64m8_t bindex,vuint64m8_t value,size_t vl)773 vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
774   return vamoaddei64_v_u64m8(base, bindex, value, vl);
775 }
776 
777 //
778 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
779 // CHECK-RV64-NEXT:  entry:
780 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
781 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
782 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
783 //
test_vamoaddei8_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint8mf8_t bindex,vint32mf2_t value,size_t vl)784 vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
785   return vamoaddei8_v_i32mf2_m(mask, base, bindex, value, vl);
786 }
787 
788 //
789 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
790 // CHECK-RV64-NEXT:  entry:
791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
792 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
794 //
test_vamoaddei8_v_i32m1_m(vbool32_t mask,int32_t * base,vuint8mf4_t bindex,vint32m1_t value,size_t vl)795 vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
796   return vamoaddei8_v_i32m1_m(mask, base, bindex, value, vl);
797 }
798 
799 //
800 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
801 // CHECK-RV64-NEXT:  entry:
802 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
803 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
804 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
805 //
test_vamoaddei8_v_i32m2_m(vbool16_t mask,int32_t * base,vuint8mf2_t bindex,vint32m2_t value,size_t vl)806 vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
807   return vamoaddei8_v_i32m2_m(mask, base, bindex, value, vl);
808 }
809 
810 //
811 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
812 // CHECK-RV64-NEXT:  entry:
813 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
814 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
815 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
816 //
test_vamoaddei8_v_i32m4_m(vbool8_t mask,int32_t * base,vuint8m1_t bindex,vint32m4_t value,size_t vl)817 vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
818   return vamoaddei8_v_i32m4_m(mask, base, bindex, value, vl);
819 }
820 
821 //
822 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
823 // CHECK-RV64-NEXT:  entry:
824 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
825 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
826 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
827 //
test_vamoaddei8_v_i32m8_m(vbool4_t mask,int32_t * base,vuint8m2_t bindex,vint32m8_t value,size_t vl)828 vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
829   return vamoaddei8_v_i32m8_m(mask, base, bindex, value, vl);
830 }
831 
832 //
833 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
834 // CHECK-RV64-NEXT:  entry:
835 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
836 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
837 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
838 //
test_vamoaddei16_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint16mf4_t bindex,vint32mf2_t value,size_t vl)839 vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
840   return vamoaddei16_v_i32mf2_m(mask, base, bindex, value, vl);
841 }
842 
843 //
844 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
845 // CHECK-RV64-NEXT:  entry:
846 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
847 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
848 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
849 //
test_vamoaddei16_v_i32m1_m(vbool32_t mask,int32_t * base,vuint16mf2_t bindex,vint32m1_t value,size_t vl)850 vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
851   return vamoaddei16_v_i32m1_m(mask, base, bindex, value, vl);
852 }
853 
854 //
855 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
856 // CHECK-RV64-NEXT:  entry:
857 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
858 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
859 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
860 //
test_vamoaddei16_v_i32m2_m(vbool16_t mask,int32_t * base,vuint16m1_t bindex,vint32m2_t value,size_t vl)861 vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
862   return vamoaddei16_v_i32m2_m(mask, base, bindex, value, vl);
863 }
864 
865 //
866 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
867 // CHECK-RV64-NEXT:  entry:
868 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
869 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
870 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
871 //
test_vamoaddei16_v_i32m4_m(vbool8_t mask,int32_t * base,vuint16m2_t bindex,vint32m4_t value,size_t vl)872 vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
873   return vamoaddei16_v_i32m4_m(mask, base, bindex, value, vl);
874 }
875 
876 //
877 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
878 // CHECK-RV64-NEXT:  entry:
879 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
880 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
881 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
882 //
test_vamoaddei16_v_i32m8_m(vbool4_t mask,int32_t * base,vuint16m4_t bindex,vint32m8_t value,size_t vl)883 vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
884   return vamoaddei16_v_i32m8_m(mask, base, bindex, value, vl);
885 }
886 
887 //
888 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
889 // CHECK-RV64-NEXT:  entry:
890 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
891 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
893 //
test_vamoaddei32_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint32mf2_t bindex,vint32mf2_t value,size_t vl)894 vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
895   return vamoaddei32_v_i32mf2_m(mask, base, bindex, value, vl);
896 }
897 
898 //
899 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
900 // CHECK-RV64-NEXT:  entry:
901 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
902 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
904 //
test_vamoaddei32_v_i32m1_m(vbool32_t mask,int32_t * base,vuint32m1_t bindex,vint32m1_t value,size_t vl)905 vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
906   return vamoaddei32_v_i32m1_m(mask, base, bindex, value, vl);
907 }
908 
909 //
910 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
911 // CHECK-RV64-NEXT:  entry:
912 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
913 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
914 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
915 //
test_vamoaddei32_v_i32m2_m(vbool16_t mask,int32_t * base,vuint32m2_t bindex,vint32m2_t value,size_t vl)916 vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
917   return vamoaddei32_v_i32m2_m(mask, base, bindex, value, vl);
918 }
919 
920 //
921 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
922 // CHECK-RV64-NEXT:  entry:
923 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
924 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
925 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
926 //
test_vamoaddei32_v_i32m4_m(vbool8_t mask,int32_t * base,vuint32m4_t bindex,vint32m4_t value,size_t vl)927 vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
928   return vamoaddei32_v_i32m4_m(mask, base, bindex, value, vl);
929 }
930 
931 //
932 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
933 // CHECK-RV64-NEXT:  entry:
934 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
935 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
936 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
937 //
test_vamoaddei32_v_i32m8_m(vbool4_t mask,int32_t * base,vuint32m8_t bindex,vint32m8_t value,size_t vl)938 vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
939   return vamoaddei32_v_i32m8_m(mask, base, bindex, value, vl);
940 }
941 
942 //
943 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
944 // CHECK-RV64-NEXT:  entry:
945 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
946 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
947 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
948 //
test_vamoaddei64_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint64m1_t bindex,vint32mf2_t value,size_t vl)949 vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
950   return vamoaddei64_v_i32mf2_m(mask, base, bindex, value, vl);
951 }
952 
953 //
954 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
955 // CHECK-RV64-NEXT:  entry:
956 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
957 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
959 //
test_vamoaddei64_v_i32m1_m(vbool32_t mask,int32_t * base,vuint64m2_t bindex,vint32m1_t value,size_t vl)960 vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
961   return vamoaddei64_v_i32m1_m(mask, base, bindex, value, vl);
962 }
963 
964 //
965 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
966 // CHECK-RV64-NEXT:  entry:
967 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
968 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
969 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
970 //
test_vamoaddei64_v_i32m2_m(vbool16_t mask,int32_t * base,vuint64m4_t bindex,vint32m2_t value,size_t vl)971 vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
972   return vamoaddei64_v_i32m2_m(mask, base, bindex, value, vl);
973 }
974 
975 //
976 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
977 // CHECK-RV64-NEXT:  entry:
978 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
979 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
981 //
test_vamoaddei64_v_i32m4_m(vbool8_t mask,int32_t * base,vuint64m8_t bindex,vint32m4_t value,size_t vl)982 vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
983   return vamoaddei64_v_i32m4_m(mask, base, bindex, value, vl);
984 }
985 
986 //
987 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
988 // CHECK-RV64-NEXT:  entry:
989 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
990 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
992 //
test_vamoaddei8_v_i64m1_m(vbool64_t mask,int64_t * base,vuint8mf8_t bindex,vint64m1_t value,size_t vl)993 vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
994   return vamoaddei8_v_i64m1_m(mask, base, bindex, value, vl);
995 }
996 
997 //
998 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
999 // CHECK-RV64-NEXT:  entry:
1000 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1001 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1002 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1003 //
test_vamoaddei8_v_i64m2_m(vbool32_t mask,int64_t * base,vuint8mf4_t bindex,vint64m2_t value,size_t vl)1004 vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
1005   return vamoaddei8_v_i64m2_m(mask, base, bindex, value, vl);
1006 }
1007 
1008 //
1009 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
1010 // CHECK-RV64-NEXT:  entry:
1011 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1012 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1013 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1014 //
test_vamoaddei8_v_i64m4_m(vbool16_t mask,int64_t * base,vuint8mf2_t bindex,vint64m4_t value,size_t vl)1015 vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
1016   return vamoaddei8_v_i64m4_m(mask, base, bindex, value, vl);
1017 }
1018 
1019 //
1020 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
1021 // CHECK-RV64-NEXT:  entry:
1022 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1023 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1024 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1025 //
test_vamoaddei8_v_i64m8_m(vbool8_t mask,int64_t * base,vuint8m1_t bindex,vint64m8_t value,size_t vl)1026 vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
1027   return vamoaddei8_v_i64m8_m(mask, base, bindex, value, vl);
1028 }
1029 
1030 //
1031 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
1032 // CHECK-RV64-NEXT:  entry:
1033 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1034 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1035 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1036 //
test_vamoaddei16_v_i64m1_m(vbool64_t mask,int64_t * base,vuint16mf4_t bindex,vint64m1_t value,size_t vl)1037 vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
1038   return vamoaddei16_v_i64m1_m(mask, base, bindex, value, vl);
1039 }
1040 
1041 //
1042 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
1043 // CHECK-RV64-NEXT:  entry:
1044 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1045 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1046 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1047 //
test_vamoaddei16_v_i64m2_m(vbool32_t mask,int64_t * base,vuint16mf2_t bindex,vint64m2_t value,size_t vl)1048 vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
1049   return vamoaddei16_v_i64m2_m(mask, base, bindex, value, vl);
1050 }
1051 
1052 //
1053 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
1054 // CHECK-RV64-NEXT:  entry:
1055 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1056 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1057 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1058 //
test_vamoaddei16_v_i64m4_m(vbool16_t mask,int64_t * base,vuint16m1_t bindex,vint64m4_t value,size_t vl)1059 vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
1060   return vamoaddei16_v_i64m4_m(mask, base, bindex, value, vl);
1061 }
1062 
1063 //
1064 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
1065 // CHECK-RV64-NEXT:  entry:
1066 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1067 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1068 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1069 //
test_vamoaddei16_v_i64m8_m(vbool8_t mask,int64_t * base,vuint16m2_t bindex,vint64m8_t value,size_t vl)1070 vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
1071   return vamoaddei16_v_i64m8_m(mask, base, bindex, value, vl);
1072 }
1073 
1074 //
1075 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
1076 // CHECK-RV64-NEXT:  entry:
1077 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1078 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1079 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1080 //
test_vamoaddei32_v_i64m1_m(vbool64_t mask,int64_t * base,vuint32mf2_t bindex,vint64m1_t value,size_t vl)1081 vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
1082   return vamoaddei32_v_i64m1_m(mask, base, bindex, value, vl);
1083 }
1084 
1085 //
1086 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
1087 // CHECK-RV64-NEXT:  entry:
1088 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1089 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1090 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1091 //
test_vamoaddei32_v_i64m2_m(vbool32_t mask,int64_t * base,vuint32m1_t bindex,vint64m2_t value,size_t vl)1092 vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
1093   return vamoaddei32_v_i64m2_m(mask, base, bindex, value, vl);
1094 }
1095 
1096 //
1097 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
1098 // CHECK-RV64-NEXT:  entry:
1099 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1100 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1101 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1102 //
test_vamoaddei32_v_i64m4_m(vbool16_t mask,int64_t * base,vuint32m2_t bindex,vint64m4_t value,size_t vl)1103 vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
1104   return vamoaddei32_v_i64m4_m(mask, base, bindex, value, vl);
1105 }
1106 
1107 //
1108 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
1109 // CHECK-RV64-NEXT:  entry:
1110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1111 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1112 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1113 //
test_vamoaddei32_v_i64m8_m(vbool8_t mask,int64_t * base,vuint32m4_t bindex,vint64m8_t value,size_t vl)1114 vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
1115   return vamoaddei32_v_i64m8_m(mask, base, bindex, value, vl);
1116 }
1117 
1118 //
1119 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
1120 // CHECK-RV64-NEXT:  entry:
1121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1122 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1123 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1124 //
test_vamoaddei64_v_i64m1_m(vbool64_t mask,int64_t * base,vuint64m1_t bindex,vint64m1_t value,size_t vl)1125 vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
1126   return vamoaddei64_v_i64m1_m(mask, base, bindex, value, vl);
1127 }
1128 
1129 //
1130 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
1131 // CHECK-RV64-NEXT:  entry:
1132 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1133 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1134 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1135 //
test_vamoaddei64_v_i64m2_m(vbool32_t mask,int64_t * base,vuint64m2_t bindex,vint64m2_t value,size_t vl)1136 vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
1137   return vamoaddei64_v_i64m2_m(mask, base, bindex, value, vl);
1138 }
1139 
1140 //
1141 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
1142 // CHECK-RV64-NEXT:  entry:
1143 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1144 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1145 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1146 //
test_vamoaddei64_v_i64m4_m(vbool16_t mask,int64_t * base,vuint64m4_t bindex,vint64m4_t value,size_t vl)1147 vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
1148   return vamoaddei64_v_i64m4_m(mask, base, bindex, value, vl);
1149 }
1150 
1151 //
1152 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
1153 // CHECK-RV64-NEXT:  entry:
1154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1155 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1156 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1157 //
test_vamoaddei64_v_i64m8_m(vbool8_t mask,int64_t * base,vuint64m8_t bindex,vint64m8_t value,size_t vl)1158 vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
1159   return vamoaddei64_v_i64m8_m(mask, base, bindex, value, vl);
1160 }
1161 
1162 //
1163 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
1164 // CHECK-RV64-NEXT:  entry:
1165 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1166 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1167 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1168 //
test_vamoaddei8_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint8mf8_t bindex,vuint32mf2_t value,size_t vl)1169 vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
1170   return vamoaddei8_v_u32mf2_m(mask, base, bindex, value, vl);
1171 }
1172 
1173 //
1174 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
1175 // CHECK-RV64-NEXT:  entry:
1176 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1177 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1178 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1179 //
test_vamoaddei8_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint8mf4_t bindex,vuint32m1_t value,size_t vl)1180 vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
1181   return vamoaddei8_v_u32m1_m(mask, base, bindex, value, vl);
1182 }
1183 
1184 //
1185 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
1186 // CHECK-RV64-NEXT:  entry:
1187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1188 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1189 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1190 //
test_vamoaddei8_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint8mf2_t bindex,vuint32m2_t value,size_t vl)1191 vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
1192   return vamoaddei8_v_u32m2_m(mask, base, bindex, value, vl);
1193 }
1194 
1195 //
1196 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
1197 // CHECK-RV64-NEXT:  entry:
1198 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1199 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1200 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1201 //
test_vamoaddei8_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint8m1_t bindex,vuint32m4_t value,size_t vl)1202 vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
1203   return vamoaddei8_v_u32m4_m(mask, base, bindex, value, vl);
1204 }
1205 
1206 //
1207 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
1208 // CHECK-RV64-NEXT:  entry:
1209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1210 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1211 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1212 //
test_vamoaddei8_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint8m2_t bindex,vuint32m8_t value,size_t vl)1213 vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
1214   return vamoaddei8_v_u32m8_m(mask, base, bindex, value, vl);
1215 }
1216 
1217 //
1218 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
1219 // CHECK-RV64-NEXT:  entry:
1220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1221 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1222 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1223 //
test_vamoaddei16_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint16mf4_t bindex,vuint32mf2_t value,size_t vl)1224 vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
1225   return vamoaddei16_v_u32mf2_m(mask, base, bindex, value, vl);
1226 }
1227 
1228 //
1229 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
1230 // CHECK-RV64-NEXT:  entry:
1231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1232 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1233 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1234 //
test_vamoaddei16_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint16mf2_t bindex,vuint32m1_t value,size_t vl)1235 vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
1236   return vamoaddei16_v_u32m1_m(mask, base, bindex, value, vl);
1237 }
1238 
1239 //
1240 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
1241 // CHECK-RV64-NEXT:  entry:
1242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1243 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1244 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1245 //
test_vamoaddei16_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint16m1_t bindex,vuint32m2_t value,size_t vl)1246 vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
1247   return vamoaddei16_v_u32m2_m(mask, base, bindex, value, vl);
1248 }
1249 
1250 //
1251 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
1252 // CHECK-RV64-NEXT:  entry:
1253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1254 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1255 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1256 //
test_vamoaddei16_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint16m2_t bindex,vuint32m4_t value,size_t vl)1257 vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
1258   return vamoaddei16_v_u32m4_m(mask, base, bindex, value, vl);
1259 }
1260 
1261 //
1262 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
1263 // CHECK-RV64-NEXT:  entry:
1264 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1265 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1266 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1267 //
test_vamoaddei16_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint16m4_t bindex,vuint32m8_t value,size_t vl)1268 vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
1269   return vamoaddei16_v_u32m8_m(mask, base, bindex, value, vl);
1270 }
1271 
1272 //
1273 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
1274 // CHECK-RV64-NEXT:  entry:
1275 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1276 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1277 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1278 //
test_vamoaddei32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t bindex,vuint32mf2_t value,size_t vl)1279 vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
1280   return vamoaddei32_v_u32mf2_m(mask, base, bindex, value, vl);
1281 }
1282 
1283 //
1284 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
1285 // CHECK-RV64-NEXT:  entry:
1286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1287 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1288 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1289 //
test_vamoaddei32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t bindex,vuint32m1_t value,size_t vl)1290 vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
1291   return vamoaddei32_v_u32m1_m(mask, base, bindex, value, vl);
1292 }
1293 
1294 //
1295 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
1296 // CHECK-RV64-NEXT:  entry:
1297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1298 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1299 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1300 //
test_vamoaddei32_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint32m2_t bindex,vuint32m2_t value,size_t vl)1301 vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
1302   return vamoaddei32_v_u32m2_m(mask, base, bindex, value, vl);
1303 }
1304 
1305 //
1306 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
1307 // CHECK-RV64-NEXT:  entry:
1308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1309 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1310 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1311 //
test_vamoaddei32_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint32m4_t bindex,vuint32m4_t value,size_t vl)1312 vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
1313   return vamoaddei32_v_u32m4_m(mask, base, bindex, value, vl);
1314 }
1315 
1316 //
1317 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
1318 // CHECK-RV64-NEXT:  entry:
1319 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1320 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1321 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1322 //
test_vamoaddei32_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint32m8_t bindex,vuint32m8_t value,size_t vl)1323 vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
1324   return vamoaddei32_v_u32m8_m(mask, base, bindex, value, vl);
1325 }
1326 
1327 //
1328 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
1329 // CHECK-RV64-NEXT:  entry:
1330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1331 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1332 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1333 //
test_vamoaddei64_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint64m1_t bindex,vuint32mf2_t value,size_t vl)1334 vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
1335   return vamoaddei64_v_u32mf2_m(mask, base, bindex, value, vl);
1336 }
1337 
1338 //
1339 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
1340 // CHECK-RV64-NEXT:  entry:
1341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1342 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1343 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1344 //
test_vamoaddei64_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint64m2_t bindex,vuint32m1_t value,size_t vl)1345 vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
1346   return vamoaddei64_v_u32m1_m(mask, base, bindex, value, vl);
1347 }
1348 
1349 //
1350 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
1351 // CHECK-RV64-NEXT:  entry:
1352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1353 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1354 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1355 //
test_vamoaddei64_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint64m4_t bindex,vuint32m2_t value,size_t vl)1356 vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
1357   return vamoaddei64_v_u32m2_m(mask, base, bindex, value, vl);
1358 }
1359 
1360 //
1361 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
1362 // CHECK-RV64-NEXT:  entry:
1363 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1364 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1365 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1366 //
test_vamoaddei64_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint64m8_t bindex,vuint32m4_t value,size_t vl)1367 vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
1368   return vamoaddei64_v_u32m4_m(mask, base, bindex, value, vl);
1369 }
1370 
1371 //
1372 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
1373 // CHECK-RV64-NEXT:  entry:
1374 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1375 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1376 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1377 //
test_vamoaddei8_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint8mf8_t bindex,vuint64m1_t value,size_t vl)1378 vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
1379   return vamoaddei8_v_u64m1_m(mask, base, bindex, value, vl);
1380 }
1381 
1382 //
1383 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
1384 // CHECK-RV64-NEXT:  entry:
1385 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1386 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1387 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1388 //
test_vamoaddei8_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint8mf4_t bindex,vuint64m2_t value,size_t vl)1389 vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
1390   return vamoaddei8_v_u64m2_m(mask, base, bindex, value, vl);
1391 }
1392 
1393 //
1394 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
1395 // CHECK-RV64-NEXT:  entry:
1396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1397 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1398 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1399 //
test_vamoaddei8_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint8mf2_t bindex,vuint64m4_t value,size_t vl)1400 vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
1401   return vamoaddei8_v_u64m4_m(mask, base, bindex, value, vl);
1402 }
1403 
1404 //
1405 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
1406 // CHECK-RV64-NEXT:  entry:
1407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1408 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1409 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1410 //
test_vamoaddei8_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint8m1_t bindex,vuint64m8_t value,size_t vl)1411 vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
1412   return vamoaddei8_v_u64m8_m(mask, base, bindex, value, vl);
1413 }
1414 
1415 //
1416 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
1417 // CHECK-RV64-NEXT:  entry:
1418 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1419 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1420 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1421 //
test_vamoaddei16_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint16mf4_t bindex,vuint64m1_t value,size_t vl)1422 vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
1423   return vamoaddei16_v_u64m1_m(mask, base, bindex, value, vl);
1424 }
1425 
1426 //
1427 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
1428 // CHECK-RV64-NEXT:  entry:
1429 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1430 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1431 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1432 //
test_vamoaddei16_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint16mf2_t bindex,vuint64m2_t value,size_t vl)1433 vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
1434   return vamoaddei16_v_u64m2_m(mask, base, bindex, value, vl);
1435 }
1436 
1437 //
1438 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
1439 // CHECK-RV64-NEXT:  entry:
1440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1441 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1442 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1443 //
test_vamoaddei16_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint16m1_t bindex,vuint64m4_t value,size_t vl)1444 vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
1445   return vamoaddei16_v_u64m4_m(mask, base, bindex, value, vl);
1446 }
1447 
1448 //
1449 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
1450 // CHECK-RV64-NEXT:  entry:
1451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1452 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1453 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1454 //
test_vamoaddei16_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint16m2_t bindex,vuint64m8_t value,size_t vl)1455 vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
1456   return vamoaddei16_v_u64m8_m(mask, base, bindex, value, vl);
1457 }
1458 
1459 //
1460 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
1461 // CHECK-RV64-NEXT:  entry:
1462 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1463 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1464 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1465 //
test_vamoaddei32_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint32mf2_t bindex,vuint64m1_t value,size_t vl)1466 vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
1467   return vamoaddei32_v_u64m1_m(mask, base, bindex, value, vl);
1468 }
1469 
1470 //
1471 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
1472 // CHECK-RV64-NEXT:  entry:
1473 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1474 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1475 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1476 //
test_vamoaddei32_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint32m1_t bindex,vuint64m2_t value,size_t vl)1477 vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
1478   return vamoaddei32_v_u64m2_m(mask, base, bindex, value, vl);
1479 }
1480 
1481 //
1482 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
1483 // CHECK-RV64-NEXT:  entry:
1484 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1485 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1486 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1487 //
test_vamoaddei32_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint32m2_t bindex,vuint64m4_t value,size_t vl)1488 vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
1489   return vamoaddei32_v_u64m4_m(mask, base, bindex, value, vl);
1490 }
1491 
1492 //
1493 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
1494 // CHECK-RV64-NEXT:  entry:
1495 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1496 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1497 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1498 //
test_vamoaddei32_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint32m4_t bindex,vuint64m8_t value,size_t vl)1499 vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
1500   return vamoaddei32_v_u64m8_m(mask, base, bindex, value, vl);
1501 }
1502 
1503 //
1504 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
1505 // CHECK-RV64-NEXT:  entry:
1506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1507 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1508 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1509 //
test_vamoaddei64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t bindex,vuint64m1_t value,size_t vl)1510 vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
1511   return vamoaddei64_v_u64m1_m(mask, base, bindex, value, vl);
1512 }
1513 
1514 //
1515 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
1516 // CHECK-RV64-NEXT:  entry:
1517 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1518 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1519 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1520 //
test_vamoaddei64_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint64m2_t bindex,vuint64m2_t value,size_t vl)1521 vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
1522   return vamoaddei64_v_u64m2_m(mask, base, bindex, value, vl);
1523 }
1524 
1525 //
1526 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
1527 // CHECK-RV64-NEXT:  entry:
1528 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1529 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1530 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1531 //
test_vamoaddei64_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint64m4_t bindex,vuint64m4_t value,size_t vl)1532 vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
1533   return vamoaddei64_v_u64m4_m(mask, base, bindex, value, vl);
1534 }
1535 
1536 //
1537 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
1538 // CHECK-RV64-NEXT:  entry:
1539 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1540 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1541 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1542 //
test_vamoaddei64_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint64m8_t bindex,vuint64m8_t value,size_t vl)1543 vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
1544   return vamoaddei64_v_u64m8_m(mask, base, bindex, value, vl);
1545 }
1546 
1547