1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
12 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
13 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
14 //
test_vamoswapei8_v_i32mf2(int32_t * base,vuint8mf8_t bindex,vint32mf2_t value,size_t vl)15 vint32mf2_t test_vamoswapei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
16   return vamoswapei8_v_i32mf2(base, bindex, value, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
23 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
24 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
25 //
test_vamoswapei8_v_i32m1(int32_t * base,vuint8mf4_t bindex,vint32m1_t value,size_t vl)26 vint32m1_t test_vamoswapei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
27   return vamoswapei8_v_i32m1(base, bindex, value, vl);
28 }
29 
30 //
31 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
32 // CHECK-RV64-NEXT:  entry:
33 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
34 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
35 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
36 //
test_vamoswapei8_v_i32m2(int32_t * base,vuint8mf2_t bindex,vint32m2_t value,size_t vl)37 vint32m2_t test_vamoswapei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
38   return vamoswapei8_v_i32m2(base, bindex, value, vl);
39 }
40 
41 //
42 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
43 // CHECK-RV64-NEXT:  entry:
44 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
45 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
46 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
47 //
test_vamoswapei8_v_i32m4(int32_t * base,vuint8m1_t bindex,vint32m4_t value,size_t vl)48 vint32m4_t test_vamoswapei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
49   return vamoswapei8_v_i32m4(base, bindex, value, vl);
50 }
51 
52 //
53 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
56 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
58 //
test_vamoswapei8_v_i32m8(int32_t * base,vuint8m2_t bindex,vint32m8_t value,size_t vl)59 vint32m8_t test_vamoswapei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
60   return vamoswapei8_v_i32m8(base, bindex, value, vl);
61 }
62 
63 //
64 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
65 // CHECK-RV64-NEXT:  entry:
66 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
67 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
68 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
69 //
test_vamoswapei16_v_i32mf2(int32_t * base,vuint16mf4_t bindex,vint32mf2_t value,size_t vl)70 vint32mf2_t test_vamoswapei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
71   return vamoswapei16_v_i32mf2(base, bindex, value, vl);
72 }
73 
74 //
75 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
76 // CHECK-RV64-NEXT:  entry:
77 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
78 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
79 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
80 //
test_vamoswapei16_v_i32m1(int32_t * base,vuint16mf2_t bindex,vint32m1_t value,size_t vl)81 vint32m1_t test_vamoswapei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
82   return vamoswapei16_v_i32m1(base, bindex, value, vl);
83 }
84 
85 //
86 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
87 // CHECK-RV64-NEXT:  entry:
88 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
89 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
90 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
91 //
test_vamoswapei16_v_i32m2(int32_t * base,vuint16m1_t bindex,vint32m2_t value,size_t vl)92 vint32m2_t test_vamoswapei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
93   return vamoswapei16_v_i32m2(base, bindex, value, vl);
94 }
95 
96 //
97 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
98 // CHECK-RV64-NEXT:  entry:
99 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
100 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
102 //
test_vamoswapei16_v_i32m4(int32_t * base,vuint16m2_t bindex,vint32m4_t value,size_t vl)103 vint32m4_t test_vamoswapei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
104   return vamoswapei16_v_i32m4(base, bindex, value, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
111 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
113 //
test_vamoswapei16_v_i32m8(int32_t * base,vuint16m4_t bindex,vint32m8_t value,size_t vl)114 vint32m8_t test_vamoswapei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
115   return vamoswapei16_v_i32m8(base, bindex, value, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
122 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
123 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
124 //
test_vamoswapei32_v_i32mf2(int32_t * base,vuint32mf2_t bindex,vint32mf2_t value,size_t vl)125 vint32mf2_t test_vamoswapei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
126   return vamoswapei32_v_i32mf2(base, bindex, value, vl);
127 }
128 
129 //
130 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
131 // CHECK-RV64-NEXT:  entry:
132 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
133 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
134 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
135 //
test_vamoswapei32_v_i32m1(int32_t * base,vuint32m1_t bindex,vint32m1_t value,size_t vl)136 vint32m1_t test_vamoswapei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
137   return vamoswapei32_v_i32m1(base, bindex, value, vl);
138 }
139 
140 //
141 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
142 // CHECK-RV64-NEXT:  entry:
143 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
144 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
145 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
146 //
test_vamoswapei32_v_i32m2(int32_t * base,vuint32m2_t bindex,vint32m2_t value,size_t vl)147 vint32m2_t test_vamoswapei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
148   return vamoswapei32_v_i32m2(base, bindex, value, vl);
149 }
150 
151 //
152 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
155 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
156 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
157 //
test_vamoswapei32_v_i32m4(int32_t * base,vuint32m4_t bindex,vint32m4_t value,size_t vl)158 vint32m4_t test_vamoswapei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
159   return vamoswapei32_v_i32m4(base, bindex, value, vl);
160 }
161 
162 //
163 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
164 // CHECK-RV64-NEXT:  entry:
165 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
166 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
167 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
168 //
test_vamoswapei32_v_i32m8(int32_t * base,vuint32m8_t bindex,vint32m8_t value,size_t vl)169 vint32m8_t test_vamoswapei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
170   return vamoswapei32_v_i32m8(base, bindex, value, vl);
171 }
172 
173 //
174 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
175 // CHECK-RV64-NEXT:  entry:
176 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
177 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
178 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
179 //
test_vamoswapei64_v_i32mf2(int32_t * base,vuint64m1_t bindex,vint32mf2_t value,size_t vl)180 vint32mf2_t test_vamoswapei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
181   return vamoswapei64_v_i32mf2(base, bindex, value, vl);
182 }
183 
184 //
185 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
186 // CHECK-RV64-NEXT:  entry:
187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
188 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
189 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
190 //
test_vamoswapei64_v_i32m1(int32_t * base,vuint64m2_t bindex,vint32m1_t value,size_t vl)191 vint32m1_t test_vamoswapei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
192   return vamoswapei64_v_i32m1(base, bindex, value, vl);
193 }
194 
195 //
196 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
197 // CHECK-RV64-NEXT:  entry:
198 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
199 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
200 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
201 //
test_vamoswapei64_v_i32m2(int32_t * base,vuint64m4_t bindex,vint32m2_t value,size_t vl)202 vint32m2_t test_vamoswapei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
203   return vamoswapei64_v_i32m2(base, bindex, value, vl);
204 }
205 
206 //
207 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
210 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
212 //
test_vamoswapei64_v_i32m4(int32_t * base,vuint64m8_t bindex,vint32m4_t value,size_t vl)213 vint32m4_t test_vamoswapei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
214   return vamoswapei64_v_i32m4(base, bindex, value, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
221 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
223 //
test_vamoswapei8_v_i64m1(int64_t * base,vuint8mf8_t bindex,vint64m1_t value,size_t vl)224 vint64m1_t test_vamoswapei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
225   return vamoswapei8_v_i64m1(base, bindex, value, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
232 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
234 //
test_vamoswapei8_v_i64m2(int64_t * base,vuint8mf4_t bindex,vint64m2_t value,size_t vl)235 vint64m2_t test_vamoswapei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
236   return vamoswapei8_v_i64m2(base, bindex, value, vl);
237 }
238 
239 //
240 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
241 // CHECK-RV64-NEXT:  entry:
242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
243 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
245 //
test_vamoswapei8_v_i64m4(int64_t * base,vuint8mf2_t bindex,vint64m4_t value,size_t vl)246 vint64m4_t test_vamoswapei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
247   return vamoswapei8_v_i64m4(base, bindex, value, vl);
248 }
249 
250 //
251 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
254 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
255 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
256 //
test_vamoswapei8_v_i64m8(int64_t * base,vuint8m1_t bindex,vint64m8_t value,size_t vl)257 vint64m8_t test_vamoswapei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
258   return vamoswapei8_v_i64m8(base, bindex, value, vl);
259 }
260 
261 //
262 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
263 // CHECK-RV64-NEXT:  entry:
264 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
265 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
266 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
267 //
test_vamoswapei16_v_i64m1(int64_t * base,vuint16mf4_t bindex,vint64m1_t value,size_t vl)268 vint64m1_t test_vamoswapei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
269   return vamoswapei16_v_i64m1(base, bindex, value, vl);
270 }
271 
272 //
273 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
274 // CHECK-RV64-NEXT:  entry:
275 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
276 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
277 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
278 //
test_vamoswapei16_v_i64m2(int64_t * base,vuint16mf2_t bindex,vint64m2_t value,size_t vl)279 vint64m2_t test_vamoswapei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
280   return vamoswapei16_v_i64m2(base, bindex, value, vl);
281 }
282 
283 //
284 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
285 // CHECK-RV64-NEXT:  entry:
286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
287 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
288 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
289 //
test_vamoswapei16_v_i64m4(int64_t * base,vuint16m1_t bindex,vint64m4_t value,size_t vl)290 vint64m4_t test_vamoswapei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
291   return vamoswapei16_v_i64m4(base, bindex, value, vl);
292 }
293 
294 //
295 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
296 // CHECK-RV64-NEXT:  entry:
297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
298 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
299 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
300 //
test_vamoswapei16_v_i64m8(int64_t * base,vuint16m2_t bindex,vint64m8_t value,size_t vl)301 vint64m8_t test_vamoswapei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
302   return vamoswapei16_v_i64m8(base, bindex, value, vl);
303 }
304 
305 //
306 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
307 // CHECK-RV64-NEXT:  entry:
308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
309 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
310 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
311 //
test_vamoswapei32_v_i64m1(int64_t * base,vuint32mf2_t bindex,vint64m1_t value,size_t vl)312 vint64m1_t test_vamoswapei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
313   return vamoswapei32_v_i64m1(base, bindex, value, vl);
314 }
315 
316 //
317 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
318 // CHECK-RV64-NEXT:  entry:
319 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
320 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
322 //
test_vamoswapei32_v_i64m2(int64_t * base,vuint32m1_t bindex,vint64m2_t value,size_t vl)323 vint64m2_t test_vamoswapei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
324   return vamoswapei32_v_i64m2(base, bindex, value, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
331 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
332 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
333 //
test_vamoswapei32_v_i64m4(int64_t * base,vuint32m2_t bindex,vint64m4_t value,size_t vl)334 vint64m4_t test_vamoswapei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
335   return vamoswapei32_v_i64m4(base, bindex, value, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
342 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
344 //
test_vamoswapei32_v_i64m8(int64_t * base,vuint32m4_t bindex,vint64m8_t value,size_t vl)345 vint64m8_t test_vamoswapei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
346   return vamoswapei32_v_i64m8(base, bindex, value, vl);
347 }
348 
349 //
350 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
353 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
354 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
355 //
test_vamoswapei64_v_i64m1(int64_t * base,vuint64m1_t bindex,vint64m1_t value,size_t vl)356 vint64m1_t test_vamoswapei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
357   return vamoswapei64_v_i64m1(base, bindex, value, vl);
358 }
359 
360 //
361 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
362 // CHECK-RV64-NEXT:  entry:
363 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
364 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
365 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
366 //
test_vamoswapei64_v_i64m2(int64_t * base,vuint64m2_t bindex,vint64m2_t value,size_t vl)367 vint64m2_t test_vamoswapei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
368   return vamoswapei64_v_i64m2(base, bindex, value, vl);
369 }
370 
371 //
372 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
373 // CHECK-RV64-NEXT:  entry:
374 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
375 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
376 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
377 //
test_vamoswapei64_v_i64m4(int64_t * base,vuint64m4_t bindex,vint64m4_t value,size_t vl)378 vint64m4_t test_vamoswapei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
379   return vamoswapei64_v_i64m4(base, bindex, value, vl);
380 }
381 
382 //
383 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
384 // CHECK-RV64-NEXT:  entry:
385 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
386 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
387 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
388 //
test_vamoswapei64_v_i64m8(int64_t * base,vuint64m8_t bindex,vint64m8_t value,size_t vl)389 vint64m8_t test_vamoswapei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
390   return vamoswapei64_v_i64m8(base, bindex, value, vl);
391 }
392 
393 //
394 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
397 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
398 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
399 //
test_vamoswapei8_v_u32mf2(uint32_t * base,vuint8mf8_t bindex,vuint32mf2_t value,size_t vl)400 vuint32mf2_t test_vamoswapei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
401   return vamoswapei8_v_u32mf2(base, bindex, value, vl);
402 }
403 
404 //
405 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
406 // CHECK-RV64-NEXT:  entry:
407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
408 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
409 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
410 //
test_vamoswapei8_v_u32m1(uint32_t * base,vuint8mf4_t bindex,vuint32m1_t value,size_t vl)411 vuint32m1_t test_vamoswapei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
412   return vamoswapei8_v_u32m1(base, bindex, value, vl);
413 }
414 
415 //
416 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
417 // CHECK-RV64-NEXT:  entry:
418 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
419 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
420 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
421 //
test_vamoswapei8_v_u32m2(uint32_t * base,vuint8mf2_t bindex,vuint32m2_t value,size_t vl)422 vuint32m2_t test_vamoswapei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
423   return vamoswapei8_v_u32m2(base, bindex, value, vl);
424 }
425 
426 //
427 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
428 // CHECK-RV64-NEXT:  entry:
429 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
430 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
432 //
test_vamoswapei8_v_u32m4(uint32_t * base,vuint8m1_t bindex,vuint32m4_t value,size_t vl)433 vuint32m4_t test_vamoswapei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
434   return vamoswapei8_v_u32m4(base, bindex, value, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
441 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
443 //
test_vamoswapei8_v_u32m8(uint32_t * base,vuint8m2_t bindex,vuint32m8_t value,size_t vl)444 vuint32m8_t test_vamoswapei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
445   return vamoswapei8_v_u32m8(base, bindex, value, vl);
446 }
447 
448 //
449 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
452 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
453 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
454 //
test_vamoswapei16_v_u32mf2(uint32_t * base,vuint16mf4_t bindex,vuint32mf2_t value,size_t vl)455 vuint32mf2_t test_vamoswapei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
456   return vamoswapei16_v_u32mf2(base, bindex, value, vl);
457 }
458 
459 //
460 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
461 // CHECK-RV64-NEXT:  entry:
462 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
463 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
464 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
465 //
test_vamoswapei16_v_u32m1(uint32_t * base,vuint16mf2_t bindex,vuint32m1_t value,size_t vl)466 vuint32m1_t test_vamoswapei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
467   return vamoswapei16_v_u32m1(base, bindex, value, vl);
468 }
469 
470 //
471 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
472 // CHECK-RV64-NEXT:  entry:
473 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
474 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
475 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
476 //
test_vamoswapei16_v_u32m2(uint32_t * base,vuint16m1_t bindex,vuint32m2_t value,size_t vl)477 vuint32m2_t test_vamoswapei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
478   return vamoswapei16_v_u32m2(base, bindex, value, vl);
479 }
480 
481 //
482 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
483 // CHECK-RV64-NEXT:  entry:
484 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
485 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
486 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
487 //
test_vamoswapei16_v_u32m4(uint32_t * base,vuint16m2_t bindex,vuint32m4_t value,size_t vl)488 vuint32m4_t test_vamoswapei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
489   return vamoswapei16_v_u32m4(base, bindex, value, vl);
490 }
491 
492 //
493 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
494 // CHECK-RV64-NEXT:  entry:
495 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
496 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
497 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
498 //
test_vamoswapei16_v_u32m8(uint32_t * base,vuint16m4_t bindex,vuint32m8_t value,size_t vl)499 vuint32m8_t test_vamoswapei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
500   return vamoswapei16_v_u32m8(base, bindex, value, vl);
501 }
502 
503 //
504 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
505 // CHECK-RV64-NEXT:  entry:
506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
507 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
508 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
509 //
test_vamoswapei32_v_u32mf2(uint32_t * base,vuint32mf2_t bindex,vuint32mf2_t value,size_t vl)510 vuint32mf2_t test_vamoswapei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
511   return vamoswapei32_v_u32mf2(base, bindex, value, vl);
512 }
513 
514 //
515 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
516 // CHECK-RV64-NEXT:  entry:
517 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
518 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
519 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
520 //
test_vamoswapei32_v_u32m1(uint32_t * base,vuint32m1_t bindex,vuint32m1_t value,size_t vl)521 vuint32m1_t test_vamoswapei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
522   return vamoswapei32_v_u32m1(base, bindex, value, vl);
523 }
524 
525 //
526 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
527 // CHECK-RV64-NEXT:  entry:
528 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
529 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
530 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
531 //
test_vamoswapei32_v_u32m2(uint32_t * base,vuint32m2_t bindex,vuint32m2_t value,size_t vl)532 vuint32m2_t test_vamoswapei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
533   return vamoswapei32_v_u32m2(base, bindex, value, vl);
534 }
535 
536 //
537 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
538 // CHECK-RV64-NEXT:  entry:
539 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
540 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
542 //
test_vamoswapei32_v_u32m4(uint32_t * base,vuint32m4_t bindex,vuint32m4_t value,size_t vl)543 vuint32m4_t test_vamoswapei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
544   return vamoswapei32_v_u32m4(base, bindex, value, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
551 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
553 //
test_vamoswapei32_v_u32m8(uint32_t * base,vuint32m8_t bindex,vuint32m8_t value,size_t vl)554 vuint32m8_t test_vamoswapei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
555   return vamoswapei32_v_u32m8(base, bindex, value, vl);
556 }
557 
558 //
559 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
562 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
563 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
564 //
test_vamoswapei64_v_u32mf2(uint32_t * base,vuint64m1_t bindex,vuint32mf2_t value,size_t vl)565 vuint32mf2_t test_vamoswapei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
566   return vamoswapei64_v_u32mf2(base, bindex, value, vl);
567 }
568 
569 //
570 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
571 // CHECK-RV64-NEXT:  entry:
572 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
573 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
574 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
575 //
test_vamoswapei64_v_u32m1(uint32_t * base,vuint64m2_t bindex,vuint32m1_t value,size_t vl)576 vuint32m1_t test_vamoswapei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
577   return vamoswapei64_v_u32m1(base, bindex, value, vl);
578 }
579 
580 //
581 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
582 // CHECK-RV64-NEXT:  entry:
583 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
584 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
585 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
586 //
test_vamoswapei64_v_u32m2(uint32_t * base,vuint64m4_t bindex,vuint32m2_t value,size_t vl)587 vuint32m2_t test_vamoswapei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
588   return vamoswapei64_v_u32m2(base, bindex, value, vl);
589 }
590 
591 //
592 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
593 // CHECK-RV64-NEXT:  entry:
594 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
595 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
596 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
597 //
test_vamoswapei64_v_u32m4(uint32_t * base,vuint64m8_t bindex,vuint32m4_t value,size_t vl)598 vuint32m4_t test_vamoswapei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
599   return vamoswapei64_v_u32m4(base, bindex, value, vl);
600 }
601 
602 //
603 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
604 // CHECK-RV64-NEXT:  entry:
605 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
606 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
607 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
608 //
test_vamoswapei8_v_u64m1(uint64_t * base,vuint8mf8_t bindex,vuint64m1_t value,size_t vl)609 vuint64m1_t test_vamoswapei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
610   return vamoswapei8_v_u64m1(base, bindex, value, vl);
611 }
612 
613 //
614 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
615 // CHECK-RV64-NEXT:  entry:
616 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
617 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
618 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
619 //
test_vamoswapei8_v_u64m2(uint64_t * base,vuint8mf4_t bindex,vuint64m2_t value,size_t vl)620 vuint64m2_t test_vamoswapei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
621   return vamoswapei8_v_u64m2(base, bindex, value, vl);
622 }
623 
624 //
625 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
626 // CHECK-RV64-NEXT:  entry:
627 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
628 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
629 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
630 //
test_vamoswapei8_v_u64m4(uint64_t * base,vuint8mf2_t bindex,vuint64m4_t value,size_t vl)631 vuint64m4_t test_vamoswapei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
632   return vamoswapei8_v_u64m4(base, bindex, value, vl);
633 }
634 
635 //
636 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
637 // CHECK-RV64-NEXT:  entry:
638 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
639 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
640 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
641 //
test_vamoswapei8_v_u64m8(uint64_t * base,vuint8m1_t bindex,vuint64m8_t value,size_t vl)642 vuint64m8_t test_vamoswapei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
643   return vamoswapei8_v_u64m8(base, bindex, value, vl);
644 }
645 
646 //
647 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
648 // CHECK-RV64-NEXT:  entry:
649 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
650 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
651 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
652 //
test_vamoswapei16_v_u64m1(uint64_t * base,vuint16mf4_t bindex,vuint64m1_t value,size_t vl)653 vuint64m1_t test_vamoswapei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
654   return vamoswapei16_v_u64m1(base, bindex, value, vl);
655 }
656 
657 //
658 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
661 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
662 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
663 //
test_vamoswapei16_v_u64m2(uint64_t * base,vuint16mf2_t bindex,vuint64m2_t value,size_t vl)664 vuint64m2_t test_vamoswapei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
665   return vamoswapei16_v_u64m2(base, bindex, value, vl);
666 }
667 
668 //
669 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
672 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
673 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
674 //
test_vamoswapei16_v_u64m4(uint64_t * base,vuint16m1_t bindex,vuint64m4_t value,size_t vl)675 vuint64m4_t test_vamoswapei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
676   return vamoswapei16_v_u64m4(base, bindex, value, vl);
677 }
678 
679 //
680 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
681 // CHECK-RV64-NEXT:  entry:
682 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
683 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
684 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
685 //
test_vamoswapei16_v_u64m8(uint64_t * base,vuint16m2_t bindex,vuint64m8_t value,size_t vl)686 vuint64m8_t test_vamoswapei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
687   return vamoswapei16_v_u64m8(base, bindex, value, vl);
688 }
689 
690 //
691 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
692 // CHECK-RV64-NEXT:  entry:
693 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
694 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
695 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
696 //
test_vamoswapei32_v_u64m1(uint64_t * base,vuint32mf2_t bindex,vuint64m1_t value,size_t vl)697 vuint64m1_t test_vamoswapei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
698   return vamoswapei32_v_u64m1(base, bindex, value, vl);
699 }
700 
701 //
702 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
703 // CHECK-RV64-NEXT:  entry:
704 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
705 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
706 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
707 //
test_vamoswapei32_v_u64m2(uint64_t * base,vuint32m1_t bindex,vuint64m2_t value,size_t vl)708 vuint64m2_t test_vamoswapei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
709   return vamoswapei32_v_u64m2(base, bindex, value, vl);
710 }
711 
712 //
713 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
714 // CHECK-RV64-NEXT:  entry:
715 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
716 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
717 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
718 //
test_vamoswapei32_v_u64m4(uint64_t * base,vuint32m2_t bindex,vuint64m4_t value,size_t vl)719 vuint64m4_t test_vamoswapei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
720   return vamoswapei32_v_u64m4(base, bindex, value, vl);
721 }
722 
723 //
724 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
725 // CHECK-RV64-NEXT:  entry:
726 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
727 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
728 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
729 //
test_vamoswapei32_v_u64m8(uint64_t * base,vuint32m4_t bindex,vuint64m8_t value,size_t vl)730 vuint64m8_t test_vamoswapei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
731   return vamoswapei32_v_u64m8(base, bindex, value, vl);
732 }
733 
734 //
735 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
736 // CHECK-RV64-NEXT:  entry:
737 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
738 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
740 //
test_vamoswapei64_v_u64m1(uint64_t * base,vuint64m1_t bindex,vuint64m1_t value,size_t vl)741 vuint64m1_t test_vamoswapei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
742   return vamoswapei64_v_u64m1(base, bindex, value, vl);
743 }
744 
745 //
746 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
747 // CHECK-RV64-NEXT:  entry:
748 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
749 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
750 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
751 //
test_vamoswapei64_v_u64m2(uint64_t * base,vuint64m2_t bindex,vuint64m2_t value,size_t vl)752 vuint64m2_t test_vamoswapei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
753   return vamoswapei64_v_u64m2(base, bindex, value, vl);
754 }
755 
756 //
757 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
758 // CHECK-RV64-NEXT:  entry:
759 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
760 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
761 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
762 //
test_vamoswapei64_v_u64m4(uint64_t * base,vuint64m4_t bindex,vuint64m4_t value,size_t vl)763 vuint64m4_t test_vamoswapei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
764   return vamoswapei64_v_u64m4(base, bindex, value, vl);
765 }
766 
767 //
768 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
769 // CHECK-RV64-NEXT:  entry:
770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
771 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
772 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
773 //
test_vamoswapei64_v_u64m8(uint64_t * base,vuint64m8_t bindex,vuint64m8_t value,size_t vl)774 vuint64m8_t test_vamoswapei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
775   return vamoswapei64_v_u64m8(base, bindex, value, vl);
776 }
777 
778 //
779 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
780 // CHECK-RV64-NEXT:  entry:
781 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
782 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
783 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
784 //
test_vamoswapei8_v_f32mf2(float * base,vuint8mf8_t bindex,vfloat32mf2_t value,size_t vl)785 vfloat32mf2_t test_vamoswapei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
786   return vamoswapei8_v_f32mf2(base, bindex, value, vl);
787 }
788 
789 //
790 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
791 // CHECK-RV64-NEXT:  entry:
792 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
793 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
794 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
795 //
test_vamoswapei8_v_f32m1(float * base,vuint8mf4_t bindex,vfloat32m1_t value,size_t vl)796 vfloat32m1_t test_vamoswapei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
797   return vamoswapei8_v_f32m1(base, bindex, value, vl);
798 }
799 
800 //
801 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
802 // CHECK-RV64-NEXT:  entry:
803 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
804 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
805 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
806 //
test_vamoswapei8_v_f32m2(float * base,vuint8mf2_t bindex,vfloat32m2_t value,size_t vl)807 vfloat32m2_t test_vamoswapei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
808   return vamoswapei8_v_f32m2(base, bindex, value, vl);
809 }
810 
811 //
812 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
813 // CHECK-RV64-NEXT:  entry:
814 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
815 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
816 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
817 //
test_vamoswapei8_v_f32m4(float * base,vuint8m1_t bindex,vfloat32m4_t value,size_t vl)818 vfloat32m4_t test_vamoswapei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
819   return vamoswapei8_v_f32m4(base, bindex, value, vl);
820 }
821 
822 //
823 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
824 // CHECK-RV64-NEXT:  entry:
825 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
826 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
827 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
828 //
test_vamoswapei8_v_f32m8(float * base,vuint8m2_t bindex,vfloat32m8_t value,size_t vl)829 vfloat32m8_t test_vamoswapei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
830   return vamoswapei8_v_f32m8(base, bindex, value, vl);
831 }
832 
833 //
834 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
835 // CHECK-RV64-NEXT:  entry:
836 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
837 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
838 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
839 //
test_vamoswapei16_v_f32mf2(float * base,vuint16mf4_t bindex,vfloat32mf2_t value,size_t vl)840 vfloat32mf2_t test_vamoswapei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
841   return vamoswapei16_v_f32mf2(base, bindex, value, vl);
842 }
843 
844 //
845 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
846 // CHECK-RV64-NEXT:  entry:
847 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
848 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
849 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
850 //
test_vamoswapei16_v_f32m1(float * base,vuint16mf2_t bindex,vfloat32m1_t value,size_t vl)851 vfloat32m1_t test_vamoswapei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
852   return vamoswapei16_v_f32m1(base, bindex, value, vl);
853 }
854 
855 //
856 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
857 // CHECK-RV64-NEXT:  entry:
858 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
859 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
860 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
861 //
test_vamoswapei16_v_f32m2(float * base,vuint16m1_t bindex,vfloat32m2_t value,size_t vl)862 vfloat32m2_t test_vamoswapei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
863   return vamoswapei16_v_f32m2(base, bindex, value, vl);
864 }
865 
866 //
867 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
868 // CHECK-RV64-NEXT:  entry:
869 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
870 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
871 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
872 //
test_vamoswapei16_v_f32m4(float * base,vuint16m2_t bindex,vfloat32m4_t value,size_t vl)873 vfloat32m4_t test_vamoswapei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
874   return vamoswapei16_v_f32m4(base, bindex, value, vl);
875 }
876 
877 //
878 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
879 // CHECK-RV64-NEXT:  entry:
880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
881 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
882 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
883 //
test_vamoswapei16_v_f32m8(float * base,vuint16m4_t bindex,vfloat32m8_t value,size_t vl)884 vfloat32m8_t test_vamoswapei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
885   return vamoswapei16_v_f32m8(base, bindex, value, vl);
886 }
887 
888 //
889 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
892 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
893 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
894 //
test_vamoswapei32_v_f32mf2(float * base,vuint32mf2_t bindex,vfloat32mf2_t value,size_t vl)895 vfloat32mf2_t test_vamoswapei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
896   return vamoswapei32_v_f32mf2(base, bindex, value, vl);
897 }
898 
899 //
900 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
901 // CHECK-RV64-NEXT:  entry:
902 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
903 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
904 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
905 //
test_vamoswapei32_v_f32m1(float * base,vuint32m1_t bindex,vfloat32m1_t value,size_t vl)906 vfloat32m1_t test_vamoswapei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
907   return vamoswapei32_v_f32m1(base, bindex, value, vl);
908 }
909 
910 //
911 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
912 // CHECK-RV64-NEXT:  entry:
913 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
914 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
915 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
916 //
test_vamoswapei32_v_f32m2(float * base,vuint32m2_t bindex,vfloat32m2_t value,size_t vl)917 vfloat32m2_t test_vamoswapei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
918   return vamoswapei32_v_f32m2(base, bindex, value, vl);
919 }
920 
921 //
922 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
923 // CHECK-RV64-NEXT:  entry:
924 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
925 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
926 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
927 //
test_vamoswapei32_v_f32m4(float * base,vuint32m4_t bindex,vfloat32m4_t value,size_t vl)928 vfloat32m4_t test_vamoswapei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
929   return vamoswapei32_v_f32m4(base, bindex, value, vl);
930 }
931 
932 //
933 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
934 // CHECK-RV64-NEXT:  entry:
935 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
936 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
937 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
938 //
test_vamoswapei32_v_f32m8(float * base,vuint32m8_t bindex,vfloat32m8_t value,size_t vl)939 vfloat32m8_t test_vamoswapei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
940   return vamoswapei32_v_f32m8(base, bindex, value, vl);
941 }
942 
943 //
944 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
945 // CHECK-RV64-NEXT:  entry:
946 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
947 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
948 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
949 //
test_vamoswapei64_v_f32mf2(float * base,vuint64m1_t bindex,vfloat32mf2_t value,size_t vl)950 vfloat32mf2_t test_vamoswapei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
951   return vamoswapei64_v_f32mf2(base, bindex, value, vl);
952 }
953 
954 //
955 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
956 // CHECK-RV64-NEXT:  entry:
957 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
958 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
959 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
960 //
test_vamoswapei64_v_f32m1(float * base,vuint64m2_t bindex,vfloat32m1_t value,size_t vl)961 vfloat32m1_t test_vamoswapei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
962   return vamoswapei64_v_f32m1(base, bindex, value, vl);
963 }
964 
965 //
966 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
967 // CHECK-RV64-NEXT:  entry:
968 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
969 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
970 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
971 //
test_vamoswapei64_v_f32m2(float * base,vuint64m4_t bindex,vfloat32m2_t value,size_t vl)972 vfloat32m2_t test_vamoswapei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
973   return vamoswapei64_v_f32m2(base, bindex, value, vl);
974 }
975 
976 //
977 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
978 // CHECK-RV64-NEXT:  entry:
979 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
980 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
981 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
982 //
test_vamoswapei64_v_f32m4(float * base,vuint64m8_t bindex,vfloat32m4_t value,size_t vl)983 vfloat32m4_t test_vamoswapei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
984   return vamoswapei64_v_f32m4(base, bindex, value, vl);
985 }
986 
987 //
988 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
991 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
992 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
993 //
test_vamoswapei8_v_f64m1(double * base,vuint8mf8_t bindex,vfloat64m1_t value,size_t vl)994 vfloat64m1_t test_vamoswapei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
995   return vamoswapei8_v_f64m1(base, bindex, value, vl);
996 }
997 
998 //
999 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
1000 // CHECK-RV64-NEXT:  entry:
1001 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1002 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1003 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
1004 //
test_vamoswapei8_v_f64m2(double * base,vuint8mf4_t bindex,vfloat64m2_t value,size_t vl)1005 vfloat64m2_t test_vamoswapei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
1006   return vamoswapei8_v_f64m2(base, bindex, value, vl);
1007 }
1008 
1009 //
1010 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
1011 // CHECK-RV64-NEXT:  entry:
1012 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1013 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1014 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
1015 //
test_vamoswapei8_v_f64m4(double * base,vuint8mf2_t bindex,vfloat64m4_t value,size_t vl)1016 vfloat64m4_t test_vamoswapei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
1017   return vamoswapei8_v_f64m4(base, bindex, value, vl);
1018 }
1019 
1020 //
1021 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
1022 // CHECK-RV64-NEXT:  entry:
1023 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1024 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1025 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
1026 //
test_vamoswapei8_v_f64m8(double * base,vuint8m1_t bindex,vfloat64m8_t value,size_t vl)1027 vfloat64m8_t test_vamoswapei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
1028   return vamoswapei8_v_f64m8(base, bindex, value, vl);
1029 }
1030 
1031 //
1032 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
1033 // CHECK-RV64-NEXT:  entry:
1034 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1035 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1036 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
1037 //
test_vamoswapei16_v_f64m1(double * base,vuint16mf4_t bindex,vfloat64m1_t value,size_t vl)1038 vfloat64m1_t test_vamoswapei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
1039   return vamoswapei16_v_f64m1(base, bindex, value, vl);
1040 }
1041 
1042 //
1043 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
1044 // CHECK-RV64-NEXT:  entry:
1045 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1046 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1047 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
1048 //
test_vamoswapei16_v_f64m2(double * base,vuint16mf2_t bindex,vfloat64m2_t value,size_t vl)1049 vfloat64m2_t test_vamoswapei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
1050   return vamoswapei16_v_f64m2(base, bindex, value, vl);
1051 }
1052 
1053 //
1054 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
1055 // CHECK-RV64-NEXT:  entry:
1056 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1057 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1058 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
1059 //
test_vamoswapei16_v_f64m4(double * base,vuint16m1_t bindex,vfloat64m4_t value,size_t vl)1060 vfloat64m4_t test_vamoswapei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
1061   return vamoswapei16_v_f64m4(base, bindex, value, vl);
1062 }
1063 
1064 //
1065 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
1066 // CHECK-RV64-NEXT:  entry:
1067 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1068 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1069 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
1070 //
test_vamoswapei16_v_f64m8(double * base,vuint16m2_t bindex,vfloat64m8_t value,size_t vl)1071 vfloat64m8_t test_vamoswapei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
1072   return vamoswapei16_v_f64m8(base, bindex, value, vl);
1073 }
1074 
1075 //
1076 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
1077 // CHECK-RV64-NEXT:  entry:
1078 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1079 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1080 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
1081 //
test_vamoswapei32_v_f64m1(double * base,vuint32mf2_t bindex,vfloat64m1_t value,size_t vl)1082 vfloat64m1_t test_vamoswapei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
1083   return vamoswapei32_v_f64m1(base, bindex, value, vl);
1084 }
1085 
1086 //
1087 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
1088 // CHECK-RV64-NEXT:  entry:
1089 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1090 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1091 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
1092 //
test_vamoswapei32_v_f64m2(double * base,vuint32m1_t bindex,vfloat64m2_t value,size_t vl)1093 vfloat64m2_t test_vamoswapei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
1094   return vamoswapei32_v_f64m2(base, bindex, value, vl);
1095 }
1096 
1097 //
1098 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
1099 // CHECK-RV64-NEXT:  entry:
1100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1101 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1102 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
1103 //
test_vamoswapei32_v_f64m4(double * base,vuint32m2_t bindex,vfloat64m4_t value,size_t vl)1104 vfloat64m4_t test_vamoswapei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
1105   return vamoswapei32_v_f64m4(base, bindex, value, vl);
1106 }
1107 
1108 //
1109 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
1110 // CHECK-RV64-NEXT:  entry:
1111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1112 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1113 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
1114 //
test_vamoswapei32_v_f64m8(double * base,vuint32m4_t bindex,vfloat64m8_t value,size_t vl)1115 vfloat64m8_t test_vamoswapei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
1116   return vamoswapei32_v_f64m8(base, bindex, value, vl);
1117 }
1118 
1119 //
1120 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
1121 // CHECK-RV64-NEXT:  entry:
1122 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1123 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1124 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
1125 //
test_vamoswapei64_v_f64m1(double * base,vuint64m1_t bindex,vfloat64m1_t value,size_t vl)1126 vfloat64m1_t test_vamoswapei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
1127   return vamoswapei64_v_f64m1(base, bindex, value, vl);
1128 }
1129 
1130 //
1131 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
1132 // CHECK-RV64-NEXT:  entry:
1133 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1134 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1135 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
1136 //
test_vamoswapei64_v_f64m2(double * base,vuint64m2_t bindex,vfloat64m2_t value,size_t vl)1137 vfloat64m2_t test_vamoswapei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
1138   return vamoswapei64_v_f64m2(base, bindex, value, vl);
1139 }
1140 
1141 //
1142 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
1143 // CHECK-RV64-NEXT:  entry:
1144 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1145 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1146 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
1147 //
test_vamoswapei64_v_f64m4(double * base,vuint64m4_t bindex,vfloat64m4_t value,size_t vl)1148 vfloat64m4_t test_vamoswapei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
1149   return vamoswapei64_v_f64m4(base, bindex, value, vl);
1150 }
1151 
1152 //
1153 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
1154 // CHECK-RV64-NEXT:  entry:
1155 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1156 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
1157 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
1158 //
test_vamoswapei64_v_f64m8(double * base,vuint64m8_t bindex,vfloat64m8_t value,size_t vl)1159 vfloat64m8_t test_vamoswapei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
1160   return vamoswapei64_v_f64m8(base, bindex, value, vl);
1161 }
1162 
1163 //
1164 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
1165 // CHECK-RV64-NEXT:  entry:
1166 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1167 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1168 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1169 //
test_vamoswapei8_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint8mf8_t bindex,vint32mf2_t value,size_t vl)1170 vint32mf2_t test_vamoswapei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
1171   return vamoswapei8_v_i32mf2_m(mask, base, bindex, value, vl);
1172 }
1173 
1174 //
1175 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
1176 // CHECK-RV64-NEXT:  entry:
1177 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1178 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1179 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1180 //
test_vamoswapei8_v_i32m1_m(vbool32_t mask,int32_t * base,vuint8mf4_t bindex,vint32m1_t value,size_t vl)1181 vint32m1_t test_vamoswapei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
1182   return vamoswapei8_v_i32m1_m(mask, base, bindex, value, vl);
1183 }
1184 
1185 //
1186 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
1187 // CHECK-RV64-NEXT:  entry:
1188 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1189 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1190 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1191 //
test_vamoswapei8_v_i32m2_m(vbool16_t mask,int32_t * base,vuint8mf2_t bindex,vint32m2_t value,size_t vl)1192 vint32m2_t test_vamoswapei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
1193   return vamoswapei8_v_i32m2_m(mask, base, bindex, value, vl);
1194 }
1195 
1196 //
1197 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
1198 // CHECK-RV64-NEXT:  entry:
1199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1200 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1201 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1202 //
test_vamoswapei8_v_i32m4_m(vbool8_t mask,int32_t * base,vuint8m1_t bindex,vint32m4_t value,size_t vl)1203 vint32m4_t test_vamoswapei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
1204   return vamoswapei8_v_i32m4_m(mask, base, bindex, value, vl);
1205 }
1206 
1207 //
1208 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
1209 // CHECK-RV64-NEXT:  entry:
1210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1211 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1212 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1213 //
test_vamoswapei8_v_i32m8_m(vbool4_t mask,int32_t * base,vuint8m2_t bindex,vint32m8_t value,size_t vl)1214 vint32m8_t test_vamoswapei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
1215   return vamoswapei8_v_i32m8_m(mask, base, bindex, value, vl);
1216 }
1217 
1218 //
1219 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
1220 // CHECK-RV64-NEXT:  entry:
1221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1222 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1223 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1224 //
test_vamoswapei16_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint16mf4_t bindex,vint32mf2_t value,size_t vl)1225 vint32mf2_t test_vamoswapei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
1226   return vamoswapei16_v_i32mf2_m(mask, base, bindex, value, vl);
1227 }
1228 
1229 //
1230 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
1231 // CHECK-RV64-NEXT:  entry:
1232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1233 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1234 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1235 //
test_vamoswapei16_v_i32m1_m(vbool32_t mask,int32_t * base,vuint16mf2_t bindex,vint32m1_t value,size_t vl)1236 vint32m1_t test_vamoswapei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
1237   return vamoswapei16_v_i32m1_m(mask, base, bindex, value, vl);
1238 }
1239 
1240 //
1241 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
1242 // CHECK-RV64-NEXT:  entry:
1243 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1244 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1245 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1246 //
test_vamoswapei16_v_i32m2_m(vbool16_t mask,int32_t * base,vuint16m1_t bindex,vint32m2_t value,size_t vl)1247 vint32m2_t test_vamoswapei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
1248   return vamoswapei16_v_i32m2_m(mask, base, bindex, value, vl);
1249 }
1250 
1251 //
1252 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
1253 // CHECK-RV64-NEXT:  entry:
1254 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1255 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1256 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1257 //
test_vamoswapei16_v_i32m4_m(vbool8_t mask,int32_t * base,vuint16m2_t bindex,vint32m4_t value,size_t vl)1258 vint32m4_t test_vamoswapei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
1259   return vamoswapei16_v_i32m4_m(mask, base, bindex, value, vl);
1260 }
1261 
1262 //
1263 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
1264 // CHECK-RV64-NEXT:  entry:
1265 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1266 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1267 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1268 //
test_vamoswapei16_v_i32m8_m(vbool4_t mask,int32_t * base,vuint16m4_t bindex,vint32m8_t value,size_t vl)1269 vint32m8_t test_vamoswapei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
1270   return vamoswapei16_v_i32m8_m(mask, base, bindex, value, vl);
1271 }
1272 
1273 //
1274 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
1275 // CHECK-RV64-NEXT:  entry:
1276 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1277 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1278 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1279 //
test_vamoswapei32_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint32mf2_t bindex,vint32mf2_t value,size_t vl)1280 vint32mf2_t test_vamoswapei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
1281   return vamoswapei32_v_i32mf2_m(mask, base, bindex, value, vl);
1282 }
1283 
1284 //
1285 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
1286 // CHECK-RV64-NEXT:  entry:
1287 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1288 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1289 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1290 //
test_vamoswapei32_v_i32m1_m(vbool32_t mask,int32_t * base,vuint32m1_t bindex,vint32m1_t value,size_t vl)1291 vint32m1_t test_vamoswapei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
1292   return vamoswapei32_v_i32m1_m(mask, base, bindex, value, vl);
1293 }
1294 
1295 //
1296 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
1297 // CHECK-RV64-NEXT:  entry:
1298 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1299 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1300 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1301 //
test_vamoswapei32_v_i32m2_m(vbool16_t mask,int32_t * base,vuint32m2_t bindex,vint32m2_t value,size_t vl)1302 vint32m2_t test_vamoswapei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
1303   return vamoswapei32_v_i32m2_m(mask, base, bindex, value, vl);
1304 }
1305 
1306 //
1307 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
1308 // CHECK-RV64-NEXT:  entry:
1309 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1310 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1311 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1312 //
test_vamoswapei32_v_i32m4_m(vbool8_t mask,int32_t * base,vuint32m4_t bindex,vint32m4_t value,size_t vl)1313 vint32m4_t test_vamoswapei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
1314   return vamoswapei32_v_i32m4_m(mask, base, bindex, value, vl);
1315 }
1316 
1317 //
1318 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
1319 // CHECK-RV64-NEXT:  entry:
1320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1321 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1322 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1323 //
test_vamoswapei32_v_i32m8_m(vbool4_t mask,int32_t * base,vuint32m8_t bindex,vint32m8_t value,size_t vl)1324 vint32m8_t test_vamoswapei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
1325   return vamoswapei32_v_i32m8_m(mask, base, bindex, value, vl);
1326 }
1327 
1328 //
1329 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
1330 // CHECK-RV64-NEXT:  entry:
1331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1332 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1333 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1334 //
test_vamoswapei64_v_i32mf2_m(vbool64_t mask,int32_t * base,vuint64m1_t bindex,vint32mf2_t value,size_t vl)1335 vint32mf2_t test_vamoswapei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
1336   return vamoswapei64_v_i32mf2_m(mask, base, bindex, value, vl);
1337 }
1338 
1339 //
1340 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
1341 // CHECK-RV64-NEXT:  entry:
1342 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1343 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1344 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1345 //
test_vamoswapei64_v_i32m1_m(vbool32_t mask,int32_t * base,vuint64m2_t bindex,vint32m1_t value,size_t vl)1346 vint32m1_t test_vamoswapei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
1347   return vamoswapei64_v_i32m1_m(mask, base, bindex, value, vl);
1348 }
1349 
1350 //
1351 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
1352 // CHECK-RV64-NEXT:  entry:
1353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1354 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1355 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1356 //
test_vamoswapei64_v_i32m2_m(vbool16_t mask,int32_t * base,vuint64m4_t bindex,vint32m2_t value,size_t vl)1357 vint32m2_t test_vamoswapei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
1358   return vamoswapei64_v_i32m2_m(mask, base, bindex, value, vl);
1359 }
1360 
1361 //
1362 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
1363 // CHECK-RV64-NEXT:  entry:
1364 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1365 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1366 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1367 //
test_vamoswapei64_v_i32m4_m(vbool8_t mask,int32_t * base,vuint64m8_t bindex,vint32m4_t value,size_t vl)1368 vint32m4_t test_vamoswapei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
1369   return vamoswapei64_v_i32m4_m(mask, base, bindex, value, vl);
1370 }
1371 
1372 //
1373 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
1374 // CHECK-RV64-NEXT:  entry:
1375 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1376 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1377 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1378 //
test_vamoswapei8_v_i64m1_m(vbool64_t mask,int64_t * base,vuint8mf8_t bindex,vint64m1_t value,size_t vl)1379 vint64m1_t test_vamoswapei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
1380   return vamoswapei8_v_i64m1_m(mask, base, bindex, value, vl);
1381 }
1382 
1383 //
1384 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
1385 // CHECK-RV64-NEXT:  entry:
1386 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1387 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1388 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1389 //
test_vamoswapei8_v_i64m2_m(vbool32_t mask,int64_t * base,vuint8mf4_t bindex,vint64m2_t value,size_t vl)1390 vint64m2_t test_vamoswapei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
1391   return vamoswapei8_v_i64m2_m(mask, base, bindex, value, vl);
1392 }
1393 
1394 //
1395 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
1396 // CHECK-RV64-NEXT:  entry:
1397 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1398 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1399 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1400 //
test_vamoswapei8_v_i64m4_m(vbool16_t mask,int64_t * base,vuint8mf2_t bindex,vint64m4_t value,size_t vl)1401 vint64m4_t test_vamoswapei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
1402   return vamoswapei8_v_i64m4_m(mask, base, bindex, value, vl);
1403 }
1404 
1405 //
1406 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
1407 // CHECK-RV64-NEXT:  entry:
1408 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1409 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1410 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1411 //
test_vamoswapei8_v_i64m8_m(vbool8_t mask,int64_t * base,vuint8m1_t bindex,vint64m8_t value,size_t vl)1412 vint64m8_t test_vamoswapei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
1413   return vamoswapei8_v_i64m8_m(mask, base, bindex, value, vl);
1414 }
1415 
1416 //
1417 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
1418 // CHECK-RV64-NEXT:  entry:
1419 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1420 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1421 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1422 //
test_vamoswapei16_v_i64m1_m(vbool64_t mask,int64_t * base,vuint16mf4_t bindex,vint64m1_t value,size_t vl)1423 vint64m1_t test_vamoswapei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
1424   return vamoswapei16_v_i64m1_m(mask, base, bindex, value, vl);
1425 }
1426 
1427 //
1428 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
1429 // CHECK-RV64-NEXT:  entry:
1430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1431 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1432 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1433 //
test_vamoswapei16_v_i64m2_m(vbool32_t mask,int64_t * base,vuint16mf2_t bindex,vint64m2_t value,size_t vl)1434 vint64m2_t test_vamoswapei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
1435   return vamoswapei16_v_i64m2_m(mask, base, bindex, value, vl);
1436 }
1437 
1438 //
1439 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
1440 // CHECK-RV64-NEXT:  entry:
1441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1442 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1443 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1444 //
test_vamoswapei16_v_i64m4_m(vbool16_t mask,int64_t * base,vuint16m1_t bindex,vint64m4_t value,size_t vl)1445 vint64m4_t test_vamoswapei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
1446   return vamoswapei16_v_i64m4_m(mask, base, bindex, value, vl);
1447 }
1448 
1449 //
1450 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
1451 // CHECK-RV64-NEXT:  entry:
1452 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1453 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1454 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1455 //
test_vamoswapei16_v_i64m8_m(vbool8_t mask,int64_t * base,vuint16m2_t bindex,vint64m8_t value,size_t vl)1456 vint64m8_t test_vamoswapei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
1457   return vamoswapei16_v_i64m8_m(mask, base, bindex, value, vl);
1458 }
1459 
1460 //
1461 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
1462 // CHECK-RV64-NEXT:  entry:
1463 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1464 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1465 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1466 //
test_vamoswapei32_v_i64m1_m(vbool64_t mask,int64_t * base,vuint32mf2_t bindex,vint64m1_t value,size_t vl)1467 vint64m1_t test_vamoswapei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
1468   return vamoswapei32_v_i64m1_m(mask, base, bindex, value, vl);
1469 }
1470 
1471 //
1472 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
1473 // CHECK-RV64-NEXT:  entry:
1474 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1475 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1476 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1477 //
test_vamoswapei32_v_i64m2_m(vbool32_t mask,int64_t * base,vuint32m1_t bindex,vint64m2_t value,size_t vl)1478 vint64m2_t test_vamoswapei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
1479   return vamoswapei32_v_i64m2_m(mask, base, bindex, value, vl);
1480 }
1481 
1482 //
1483 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
1484 // CHECK-RV64-NEXT:  entry:
1485 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1486 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1487 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1488 //
test_vamoswapei32_v_i64m4_m(vbool16_t mask,int64_t * base,vuint32m2_t bindex,vint64m4_t value,size_t vl)1489 vint64m4_t test_vamoswapei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
1490   return vamoswapei32_v_i64m4_m(mask, base, bindex, value, vl);
1491 }
1492 
1493 //
1494 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
1495 // CHECK-RV64-NEXT:  entry:
1496 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1497 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1498 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1499 //
test_vamoswapei32_v_i64m8_m(vbool8_t mask,int64_t * base,vuint32m4_t bindex,vint64m8_t value,size_t vl)1500 vint64m8_t test_vamoswapei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
1501   return vamoswapei32_v_i64m8_m(mask, base, bindex, value, vl);
1502 }
1503 
1504 //
1505 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
1506 // CHECK-RV64-NEXT:  entry:
1507 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1508 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1509 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1510 //
test_vamoswapei64_v_i64m1_m(vbool64_t mask,int64_t * base,vuint64m1_t bindex,vint64m1_t value,size_t vl)1511 vint64m1_t test_vamoswapei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
1512   return vamoswapei64_v_i64m1_m(mask, base, bindex, value, vl);
1513 }
1514 
1515 //
1516 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
1517 // CHECK-RV64-NEXT:  entry:
1518 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1519 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1520 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1521 //
test_vamoswapei64_v_i64m2_m(vbool32_t mask,int64_t * base,vuint64m2_t bindex,vint64m2_t value,size_t vl)1522 vint64m2_t test_vamoswapei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
1523   return vamoswapei64_v_i64m2_m(mask, base, bindex, value, vl);
1524 }
1525 
1526 //
1527 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
1528 // CHECK-RV64-NEXT:  entry:
1529 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1530 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1531 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1532 //
test_vamoswapei64_v_i64m4_m(vbool16_t mask,int64_t * base,vuint64m4_t bindex,vint64m4_t value,size_t vl)1533 vint64m4_t test_vamoswapei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
1534   return vamoswapei64_v_i64m4_m(mask, base, bindex, value, vl);
1535 }
1536 
1537 //
1538 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
1539 // CHECK-RV64-NEXT:  entry:
1540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1541 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1542 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1543 //
test_vamoswapei64_v_i64m8_m(vbool8_t mask,int64_t * base,vuint64m8_t bindex,vint64m8_t value,size_t vl)1544 vint64m8_t test_vamoswapei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
1545   return vamoswapei64_v_i64m8_m(mask, base, bindex, value, vl);
1546 }
1547 
1548 //
1549 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
1550 // CHECK-RV64-NEXT:  entry:
1551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1552 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1553 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1554 //
test_vamoswapei8_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint8mf8_t bindex,vuint32mf2_t value,size_t vl)1555 vuint32mf2_t test_vamoswapei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
1556   return vamoswapei8_v_u32mf2_m(mask, base, bindex, value, vl);
1557 }
1558 
1559 //
1560 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
1561 // CHECK-RV64-NEXT:  entry:
1562 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1563 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1564 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1565 //
test_vamoswapei8_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint8mf4_t bindex,vuint32m1_t value,size_t vl)1566 vuint32m1_t test_vamoswapei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
1567   return vamoswapei8_v_u32m1_m(mask, base, bindex, value, vl);
1568 }
1569 
1570 //
1571 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
1572 // CHECK-RV64-NEXT:  entry:
1573 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1574 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1575 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1576 //
test_vamoswapei8_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint8mf2_t bindex,vuint32m2_t value,size_t vl)1577 vuint32m2_t test_vamoswapei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
1578   return vamoswapei8_v_u32m2_m(mask, base, bindex, value, vl);
1579 }
1580 
1581 //
1582 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
1583 // CHECK-RV64-NEXT:  entry:
1584 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1585 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1586 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1587 //
test_vamoswapei8_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint8m1_t bindex,vuint32m4_t value,size_t vl)1588 vuint32m4_t test_vamoswapei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
1589   return vamoswapei8_v_u32m4_m(mask, base, bindex, value, vl);
1590 }
1591 
1592 //
1593 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
1594 // CHECK-RV64-NEXT:  entry:
1595 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1596 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1597 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1598 //
test_vamoswapei8_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint8m2_t bindex,vuint32m8_t value,size_t vl)1599 vuint32m8_t test_vamoswapei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
1600   return vamoswapei8_v_u32m8_m(mask, base, bindex, value, vl);
1601 }
1602 
1603 //
1604 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
1605 // CHECK-RV64-NEXT:  entry:
1606 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1607 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1608 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1609 //
test_vamoswapei16_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint16mf4_t bindex,vuint32mf2_t value,size_t vl)1610 vuint32mf2_t test_vamoswapei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
1611   return vamoswapei16_v_u32mf2_m(mask, base, bindex, value, vl);
1612 }
1613 
1614 //
1615 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
1616 // CHECK-RV64-NEXT:  entry:
1617 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1618 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1619 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1620 //
test_vamoswapei16_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint16mf2_t bindex,vuint32m1_t value,size_t vl)1621 vuint32m1_t test_vamoswapei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
1622   return vamoswapei16_v_u32m1_m(mask, base, bindex, value, vl);
1623 }
1624 
1625 //
1626 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
1627 // CHECK-RV64-NEXT:  entry:
1628 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1629 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1630 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1631 //
test_vamoswapei16_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint16m1_t bindex,vuint32m2_t value,size_t vl)1632 vuint32m2_t test_vamoswapei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
1633   return vamoswapei16_v_u32m2_m(mask, base, bindex, value, vl);
1634 }
1635 
1636 //
1637 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
1638 // CHECK-RV64-NEXT:  entry:
1639 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1640 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1641 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1642 //
test_vamoswapei16_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint16m2_t bindex,vuint32m4_t value,size_t vl)1643 vuint32m4_t test_vamoswapei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
1644   return vamoswapei16_v_u32m4_m(mask, base, bindex, value, vl);
1645 }
1646 
1647 //
1648 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
1649 // CHECK-RV64-NEXT:  entry:
1650 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1651 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1652 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1653 //
test_vamoswapei16_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint16m4_t bindex,vuint32m8_t value,size_t vl)1654 vuint32m8_t test_vamoswapei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
1655   return vamoswapei16_v_u32m8_m(mask, base, bindex, value, vl);
1656 }
1657 
1658 //
1659 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
1660 // CHECK-RV64-NEXT:  entry:
1661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1662 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1663 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1664 //
test_vamoswapei32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t bindex,vuint32mf2_t value,size_t vl)1665 vuint32mf2_t test_vamoswapei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
1666   return vamoswapei32_v_u32mf2_m(mask, base, bindex, value, vl);
1667 }
1668 
1669 //
1670 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
1671 // CHECK-RV64-NEXT:  entry:
1672 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1673 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1674 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1675 //
test_vamoswapei32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t bindex,vuint32m1_t value,size_t vl)1676 vuint32m1_t test_vamoswapei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
1677   return vamoswapei32_v_u32m1_m(mask, base, bindex, value, vl);
1678 }
1679 
1680 //
1681 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
1682 // CHECK-RV64-NEXT:  entry:
1683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1684 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1685 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1686 //
test_vamoswapei32_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint32m2_t bindex,vuint32m2_t value,size_t vl)1687 vuint32m2_t test_vamoswapei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
1688   return vamoswapei32_v_u32m2_m(mask, base, bindex, value, vl);
1689 }
1690 
1691 //
1692 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
1693 // CHECK-RV64-NEXT:  entry:
1694 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1695 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1696 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1697 //
test_vamoswapei32_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint32m4_t bindex,vuint32m4_t value,size_t vl)1698 vuint32m4_t test_vamoswapei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
1699   return vamoswapei32_v_u32m4_m(mask, base, bindex, value, vl);
1700 }
1701 
1702 //
1703 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
1704 // CHECK-RV64-NEXT:  entry:
1705 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1706 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1707 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
1708 //
test_vamoswapei32_v_u32m8_m(vbool4_t mask,uint32_t * base,vuint32m8_t bindex,vuint32m8_t value,size_t vl)1709 vuint32m8_t test_vamoswapei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
1710   return vamoswapei32_v_u32m8_m(mask, base, bindex, value, vl);
1711 }
1712 
1713 //
1714 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
1715 // CHECK-RV64-NEXT:  entry:
1716 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1717 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1718 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
1719 //
test_vamoswapei64_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint64m1_t bindex,vuint32mf2_t value,size_t vl)1720 vuint32mf2_t test_vamoswapei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
1721   return vamoswapei64_v_u32mf2_m(mask, base, bindex, value, vl);
1722 }
1723 
1724 //
1725 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
1726 // CHECK-RV64-NEXT:  entry:
1727 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1728 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1729 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
1730 //
test_vamoswapei64_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint64m2_t bindex,vuint32m1_t value,size_t vl)1731 vuint32m1_t test_vamoswapei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
1732   return vamoswapei64_v_u32m1_m(mask, base, bindex, value, vl);
1733 }
1734 
1735 //
1736 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
1737 // CHECK-RV64-NEXT:  entry:
1738 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1739 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1740 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
1741 //
test_vamoswapei64_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint64m4_t bindex,vuint32m2_t value,size_t vl)1742 vuint32m2_t test_vamoswapei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
1743   return vamoswapei64_v_u32m2_m(mask, base, bindex, value, vl);
1744 }
1745 
1746 //
1747 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
1748 // CHECK-RV64-NEXT:  entry:
1749 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1750 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1751 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
1752 //
test_vamoswapei64_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint64m8_t bindex,vuint32m4_t value,size_t vl)1753 vuint32m4_t test_vamoswapei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
1754   return vamoswapei64_v_u32m4_m(mask, base, bindex, value, vl);
1755 }
1756 
1757 //
1758 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
1759 // CHECK-RV64-NEXT:  entry:
1760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1761 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1762 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1763 //
test_vamoswapei8_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint8mf8_t bindex,vuint64m1_t value,size_t vl)1764 vuint64m1_t test_vamoswapei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
1765   return vamoswapei8_v_u64m1_m(mask, base, bindex, value, vl);
1766 }
1767 
1768 //
1769 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
1770 // CHECK-RV64-NEXT:  entry:
1771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1772 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1773 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1774 //
test_vamoswapei8_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint8mf4_t bindex,vuint64m2_t value,size_t vl)1775 vuint64m2_t test_vamoswapei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
1776   return vamoswapei8_v_u64m2_m(mask, base, bindex, value, vl);
1777 }
1778 
1779 //
1780 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
1781 // CHECK-RV64-NEXT:  entry:
1782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1783 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1784 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1785 //
test_vamoswapei8_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint8mf2_t bindex,vuint64m4_t value,size_t vl)1786 vuint64m4_t test_vamoswapei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
1787   return vamoswapei8_v_u64m4_m(mask, base, bindex, value, vl);
1788 }
1789 
1790 //
1791 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
1792 // CHECK-RV64-NEXT:  entry:
1793 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1794 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1795 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1796 //
test_vamoswapei8_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint8m1_t bindex,vuint64m8_t value,size_t vl)1797 vuint64m8_t test_vamoswapei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
1798   return vamoswapei8_v_u64m8_m(mask, base, bindex, value, vl);
1799 }
1800 
1801 //
1802 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
1803 // CHECK-RV64-NEXT:  entry:
1804 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1805 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1806 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1807 //
test_vamoswapei16_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint16mf4_t bindex,vuint64m1_t value,size_t vl)1808 vuint64m1_t test_vamoswapei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
1809   return vamoswapei16_v_u64m1_m(mask, base, bindex, value, vl);
1810 }
1811 
1812 //
1813 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
1814 // CHECK-RV64-NEXT:  entry:
1815 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1816 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1817 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1818 //
test_vamoswapei16_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint16mf2_t bindex,vuint64m2_t value,size_t vl)1819 vuint64m2_t test_vamoswapei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
1820   return vamoswapei16_v_u64m2_m(mask, base, bindex, value, vl);
1821 }
1822 
1823 //
1824 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
1825 // CHECK-RV64-NEXT:  entry:
1826 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1827 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1828 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1829 //
test_vamoswapei16_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint16m1_t bindex,vuint64m4_t value,size_t vl)1830 vuint64m4_t test_vamoswapei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
1831   return vamoswapei16_v_u64m4_m(mask, base, bindex, value, vl);
1832 }
1833 
1834 //
1835 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
1836 // CHECK-RV64-NEXT:  entry:
1837 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1838 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1839 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1840 //
test_vamoswapei16_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint16m2_t bindex,vuint64m8_t value,size_t vl)1841 vuint64m8_t test_vamoswapei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
1842   return vamoswapei16_v_u64m8_m(mask, base, bindex, value, vl);
1843 }
1844 
1845 //
1846 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
1847 // CHECK-RV64-NEXT:  entry:
1848 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1849 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1850 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1851 //
test_vamoswapei32_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint32mf2_t bindex,vuint64m1_t value,size_t vl)1852 vuint64m1_t test_vamoswapei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
1853   return vamoswapei32_v_u64m1_m(mask, base, bindex, value, vl);
1854 }
1855 
1856 //
1857 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
1858 // CHECK-RV64-NEXT:  entry:
1859 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1860 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1861 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1862 //
test_vamoswapei32_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint32m1_t bindex,vuint64m2_t value,size_t vl)1863 vuint64m2_t test_vamoswapei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
1864   return vamoswapei32_v_u64m2_m(mask, base, bindex, value, vl);
1865 }
1866 
1867 //
1868 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
1869 // CHECK-RV64-NEXT:  entry:
1870 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1871 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1872 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1873 //
test_vamoswapei32_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint32m2_t bindex,vuint64m4_t value,size_t vl)1874 vuint64m4_t test_vamoswapei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
1875   return vamoswapei32_v_u64m4_m(mask, base, bindex, value, vl);
1876 }
1877 
1878 //
1879 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
1880 // CHECK-RV64-NEXT:  entry:
1881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1882 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1883 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1884 //
test_vamoswapei32_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint32m4_t bindex,vuint64m8_t value,size_t vl)1885 vuint64m8_t test_vamoswapei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
1886   return vamoswapei32_v_u64m8_m(mask, base, bindex, value, vl);
1887 }
1888 
1889 //
1890 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
1891 // CHECK-RV64-NEXT:  entry:
1892 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1893 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1894 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
1895 //
test_vamoswapei64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t bindex,vuint64m1_t value,size_t vl)1896 vuint64m1_t test_vamoswapei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
1897   return vamoswapei64_v_u64m1_m(mask, base, bindex, value, vl);
1898 }
1899 
1900 //
1901 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
1902 // CHECK-RV64-NEXT:  entry:
1903 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1904 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1905 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
1906 //
test_vamoswapei64_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint64m2_t bindex,vuint64m2_t value,size_t vl)1907 vuint64m2_t test_vamoswapei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
1908   return vamoswapei64_v_u64m2_m(mask, base, bindex, value, vl);
1909 }
1910 
1911 //
1912 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
1913 // CHECK-RV64-NEXT:  entry:
1914 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1915 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1916 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
1917 //
test_vamoswapei64_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint64m4_t bindex,vuint64m4_t value,size_t vl)1918 vuint64m4_t test_vamoswapei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
1919   return vamoswapei64_v_u64m4_m(mask, base, bindex, value, vl);
1920 }
1921 
1922 //
1923 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
1924 // CHECK-RV64-NEXT:  entry:
1925 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1926 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1927 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
1928 //
test_vamoswapei64_v_u64m8_m(vbool8_t mask,uint64_t * base,vuint64m8_t bindex,vuint64m8_t value,size_t vl)1929 vuint64m8_t test_vamoswapei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
1930   return vamoswapei64_v_u64m8_m(mask, base, bindex, value, vl);
1931 }
1932 
1933 //
1934 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
1935 // CHECK-RV64-NEXT:  entry:
1936 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
1937 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1938 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
1939 //
test_vamoswapei8_v_f32mf2_m(vbool64_t mask,float * base,vuint8mf8_t bindex,vfloat32mf2_t value,size_t vl)1940 vfloat32mf2_t test_vamoswapei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
1941   return vamoswapei8_v_f32mf2_m(mask, base, bindex, value, vl);
1942 }
1943 
1944 //
1945 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
1946 // CHECK-RV64-NEXT:  entry:
1947 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
1948 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1949 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
1950 //
test_vamoswapei8_v_f32m1_m(vbool32_t mask,float * base,vuint8mf4_t bindex,vfloat32m1_t value,size_t vl)1951 vfloat32m1_t test_vamoswapei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
1952   return vamoswapei8_v_f32m1_m(mask, base, bindex, value, vl);
1953 }
1954 
1955 //
1956 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
1957 // CHECK-RV64-NEXT:  entry:
1958 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
1959 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1960 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
1961 //
test_vamoswapei8_v_f32m2_m(vbool16_t mask,float * base,vuint8mf2_t bindex,vfloat32m2_t value,size_t vl)1962 vfloat32m2_t test_vamoswapei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
1963   return vamoswapei8_v_f32m2_m(mask, base, bindex, value, vl);
1964 }
1965 
1966 //
1967 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
1968 // CHECK-RV64-NEXT:  entry:
1969 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
1970 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1971 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
1972 //
test_vamoswapei8_v_f32m4_m(vbool8_t mask,float * base,vuint8m1_t bindex,vfloat32m4_t value,size_t vl)1973 vfloat32m4_t test_vamoswapei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
1974   return vamoswapei8_v_f32m4_m(mask, base, bindex, value, vl);
1975 }
1976 
1977 //
1978 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
1979 // CHECK-RV64-NEXT:  entry:
1980 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
1981 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1982 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
1983 //
test_vamoswapei8_v_f32m8_m(vbool4_t mask,float * base,vuint8m2_t bindex,vfloat32m8_t value,size_t vl)1984 vfloat32m8_t test_vamoswapei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
1985   return vamoswapei8_v_f32m8_m(mask, base, bindex, value, vl);
1986 }
1987 
1988 //
1989 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
1990 // CHECK-RV64-NEXT:  entry:
1991 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
1992 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1993 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
1994 //
test_vamoswapei16_v_f32mf2_m(vbool64_t mask,float * base,vuint16mf4_t bindex,vfloat32mf2_t value,size_t vl)1995 vfloat32mf2_t test_vamoswapei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
1996   return vamoswapei16_v_f32mf2_m(mask, base, bindex, value, vl);
1997 }
1998 
1999 //
2000 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
2001 // CHECK-RV64-NEXT:  entry:
2002 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
2003 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2004 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
2005 //
test_vamoswapei16_v_f32m1_m(vbool32_t mask,float * base,vuint16mf2_t bindex,vfloat32m1_t value,size_t vl)2006 vfloat32m1_t test_vamoswapei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
2007   return vamoswapei16_v_f32m1_m(mask, base, bindex, value, vl);
2008 }
2009 
2010 //
2011 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
2012 // CHECK-RV64-NEXT:  entry:
2013 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
2014 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2015 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
2016 //
test_vamoswapei16_v_f32m2_m(vbool16_t mask,float * base,vuint16m1_t bindex,vfloat32m2_t value,size_t vl)2017 vfloat32m2_t test_vamoswapei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
2018   return vamoswapei16_v_f32m2_m(mask, base, bindex, value, vl);
2019 }
2020 
2021 //
2022 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
2023 // CHECK-RV64-NEXT:  entry:
2024 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
2025 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2026 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
2027 //
test_vamoswapei16_v_f32m4_m(vbool8_t mask,float * base,vuint16m2_t bindex,vfloat32m4_t value,size_t vl)2028 vfloat32m4_t test_vamoswapei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
2029   return vamoswapei16_v_f32m4_m(mask, base, bindex, value, vl);
2030 }
2031 
2032 //
2033 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
2034 // CHECK-RV64-NEXT:  entry:
2035 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
2036 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2037 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
2038 //
test_vamoswapei16_v_f32m8_m(vbool4_t mask,float * base,vuint16m4_t bindex,vfloat32m8_t value,size_t vl)2039 vfloat32m8_t test_vamoswapei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
2040   return vamoswapei16_v_f32m8_m(mask, base, bindex, value, vl);
2041 }
2042 
2043 //
2044 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
2045 // CHECK-RV64-NEXT:  entry:
2046 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
2047 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2048 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
2049 //
test_vamoswapei32_v_f32mf2_m(vbool64_t mask,float * base,vuint32mf2_t bindex,vfloat32mf2_t value,size_t vl)2050 vfloat32mf2_t test_vamoswapei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
2051   return vamoswapei32_v_f32mf2_m(mask, base, bindex, value, vl);
2052 }
2053 
2054 //
2055 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
2056 // CHECK-RV64-NEXT:  entry:
2057 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
2058 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2059 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
2060 //
test_vamoswapei32_v_f32m1_m(vbool32_t mask,float * base,vuint32m1_t bindex,vfloat32m1_t value,size_t vl)2061 vfloat32m1_t test_vamoswapei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
2062   return vamoswapei32_v_f32m1_m(mask, base, bindex, value, vl);
2063 }
2064 
2065 //
2066 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
2067 // CHECK-RV64-NEXT:  entry:
2068 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
2069 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2070 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
2071 //
test_vamoswapei32_v_f32m2_m(vbool16_t mask,float * base,vuint32m2_t bindex,vfloat32m2_t value,size_t vl)2072 vfloat32m2_t test_vamoswapei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
2073   return vamoswapei32_v_f32m2_m(mask, base, bindex, value, vl);
2074 }
2075 
2076 //
2077 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
2078 // CHECK-RV64-NEXT:  entry:
2079 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
2080 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2081 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
2082 //
test_vamoswapei32_v_f32m4_m(vbool8_t mask,float * base,vuint32m4_t bindex,vfloat32m4_t value,size_t vl)2083 vfloat32m4_t test_vamoswapei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
2084   return vamoswapei32_v_f32m4_m(mask, base, bindex, value, vl);
2085 }
2086 
2087 //
2088 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
2089 // CHECK-RV64-NEXT:  entry:
2090 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
2091 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2092 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
2093 //
test_vamoswapei32_v_f32m8_m(vbool4_t mask,float * base,vuint32m8_t bindex,vfloat32m8_t value,size_t vl)2094 vfloat32m8_t test_vamoswapei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
2095   return vamoswapei32_v_f32m8_m(mask, base, bindex, value, vl);
2096 }
2097 
2098 //
2099 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
2100 // CHECK-RV64-NEXT:  entry:
2101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
2102 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2103 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
2104 //
test_vamoswapei64_v_f32mf2_m(vbool64_t mask,float * base,vuint64m1_t bindex,vfloat32mf2_t value,size_t vl)2105 vfloat32mf2_t test_vamoswapei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
2106   return vamoswapei64_v_f32mf2_m(mask, base, bindex, value, vl);
2107 }
2108 
2109 //
2110 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
2111 // CHECK-RV64-NEXT:  entry:
2112 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
2113 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2114 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
2115 //
test_vamoswapei64_v_f32m1_m(vbool32_t mask,float * base,vuint64m2_t bindex,vfloat32m1_t value,size_t vl)2116 vfloat32m1_t test_vamoswapei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
2117   return vamoswapei64_v_f32m1_m(mask, base, bindex, value, vl);
2118 }
2119 
2120 //
2121 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
2122 // CHECK-RV64-NEXT:  entry:
2123 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
2124 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2125 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
2126 //
test_vamoswapei64_v_f32m2_m(vbool16_t mask,float * base,vuint64m4_t bindex,vfloat32m2_t value,size_t vl)2127 vfloat32m2_t test_vamoswapei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
2128   return vamoswapei64_v_f32m2_m(mask, base, bindex, value, vl);
2129 }
2130 
2131 //
2132 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
2133 // CHECK-RV64-NEXT:  entry:
2134 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
2135 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2136 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
2137 //
test_vamoswapei64_v_f32m4_m(vbool8_t mask,float * base,vuint64m8_t bindex,vfloat32m4_t value,size_t vl)2138 vfloat32m4_t test_vamoswapei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
2139   return vamoswapei64_v_f32m4_m(mask, base, bindex, value, vl);
2140 }
2141 
2142 //
2143 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
2144 // CHECK-RV64-NEXT:  entry:
2145 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
2146 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2147 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
2148 //
test_vamoswapei8_v_f64m1_m(vbool64_t mask,double * base,vuint8mf8_t bindex,vfloat64m1_t value,size_t vl)2149 vfloat64m1_t test_vamoswapei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
2150   return vamoswapei8_v_f64m1_m(mask, base, bindex, value, vl);
2151 }
2152 
2153 //
2154 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
2155 // CHECK-RV64-NEXT:  entry:
2156 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
2157 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2158 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
2159 //
test_vamoswapei8_v_f64m2_m(vbool32_t mask,double * base,vuint8mf4_t bindex,vfloat64m2_t value,size_t vl)2160 vfloat64m2_t test_vamoswapei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
2161   return vamoswapei8_v_f64m2_m(mask, base, bindex, value, vl);
2162 }
2163 
2164 //
2165 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
2166 // CHECK-RV64-NEXT:  entry:
2167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
2168 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2169 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
2170 //
test_vamoswapei8_v_f64m4_m(vbool16_t mask,double * base,vuint8mf2_t bindex,vfloat64m4_t value,size_t vl)2171 vfloat64m4_t test_vamoswapei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
2172   return vamoswapei8_v_f64m4_m(mask, base, bindex, value, vl);
2173 }
2174 
2175 //
2176 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
2177 // CHECK-RV64-NEXT:  entry:
2178 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
2179 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2180 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
2181 //
test_vamoswapei8_v_f64m8_m(vbool8_t mask,double * base,vuint8m1_t bindex,vfloat64m8_t value,size_t vl)2182 vfloat64m8_t test_vamoswapei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
2183   return vamoswapei8_v_f64m8_m(mask, base, bindex, value, vl);
2184 }
2185 
2186 //
2187 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
2188 // CHECK-RV64-NEXT:  entry:
2189 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
2190 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2191 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
2192 //
test_vamoswapei16_v_f64m1_m(vbool64_t mask,double * base,vuint16mf4_t bindex,vfloat64m1_t value,size_t vl)2193 vfloat64m1_t test_vamoswapei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
2194   return vamoswapei16_v_f64m1_m(mask, base, bindex, value, vl);
2195 }
2196 
2197 //
2198 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
2199 // CHECK-RV64-NEXT:  entry:
2200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
2201 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2202 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
2203 //
test_vamoswapei16_v_f64m2_m(vbool32_t mask,double * base,vuint16mf2_t bindex,vfloat64m2_t value,size_t vl)2204 vfloat64m2_t test_vamoswapei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
2205   return vamoswapei16_v_f64m2_m(mask, base, bindex, value, vl);
2206 }
2207 
2208 //
2209 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
2210 // CHECK-RV64-NEXT:  entry:
2211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
2212 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2213 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
2214 //
test_vamoswapei16_v_f64m4_m(vbool16_t mask,double * base,vuint16m1_t bindex,vfloat64m4_t value,size_t vl)2215 vfloat64m4_t test_vamoswapei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
2216   return vamoswapei16_v_f64m4_m(mask, base, bindex, value, vl);
2217 }
2218 
2219 //
2220 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
2221 // CHECK-RV64-NEXT:  entry:
2222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
2223 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2224 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
2225 //
test_vamoswapei16_v_f64m8_m(vbool8_t mask,double * base,vuint16m2_t bindex,vfloat64m8_t value,size_t vl)2226 vfloat64m8_t test_vamoswapei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
2227   return vamoswapei16_v_f64m8_m(mask, base, bindex, value, vl);
2228 }
2229 
2230 //
2231 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
2232 // CHECK-RV64-NEXT:  entry:
2233 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
2234 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2235 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
2236 //
test_vamoswapei32_v_f64m1_m(vbool64_t mask,double * base,vuint32mf2_t bindex,vfloat64m1_t value,size_t vl)2237 vfloat64m1_t test_vamoswapei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
2238   return vamoswapei32_v_f64m1_m(mask, base, bindex, value, vl);
2239 }
2240 
2241 //
2242 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
2243 // CHECK-RV64-NEXT:  entry:
2244 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
2245 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2246 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
2247 //
test_vamoswapei32_v_f64m2_m(vbool32_t mask,double * base,vuint32m1_t bindex,vfloat64m2_t value,size_t vl)2248 vfloat64m2_t test_vamoswapei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
2249   return vamoswapei32_v_f64m2_m(mask, base, bindex, value, vl);
2250 }
2251 
2252 //
2253 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
2254 // CHECK-RV64-NEXT:  entry:
2255 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
2256 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2257 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
2258 //
test_vamoswapei32_v_f64m4_m(vbool16_t mask,double * base,vuint32m2_t bindex,vfloat64m4_t value,size_t vl)2259 vfloat64m4_t test_vamoswapei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
2260   return vamoswapei32_v_f64m4_m(mask, base, bindex, value, vl);
2261 }
2262 
2263 //
2264 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
2265 // CHECK-RV64-NEXT:  entry:
2266 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
2267 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2268 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
2269 //
test_vamoswapei32_v_f64m8_m(vbool8_t mask,double * base,vuint32m4_t bindex,vfloat64m8_t value,size_t vl)2270 vfloat64m8_t test_vamoswapei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
2271   return vamoswapei32_v_f64m8_m(mask, base, bindex, value, vl);
2272 }
2273 
2274 //
2275 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
2276 // CHECK-RV64-NEXT:  entry:
2277 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
2278 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2279 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
2280 //
test_vamoswapei64_v_f64m1_m(vbool64_t mask,double * base,vuint64m1_t bindex,vfloat64m1_t value,size_t vl)2281 vfloat64m1_t test_vamoswapei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
2282   return vamoswapei64_v_f64m1_m(mask, base, bindex, value, vl);
2283 }
2284 
2285 //
2286 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
2287 // CHECK-RV64-NEXT:  entry:
2288 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
2289 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2290 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
2291 //
test_vamoswapei64_v_f64m2_m(vbool32_t mask,double * base,vuint64m2_t bindex,vfloat64m2_t value,size_t vl)2292 vfloat64m2_t test_vamoswapei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
2293   return vamoswapei64_v_f64m2_m(mask, base, bindex, value, vl);
2294 }
2295 
2296 //
2297 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
2298 // CHECK-RV64-NEXT:  entry:
2299 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
2300 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2301 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
2302 //
test_vamoswapei64_v_f64m4_m(vbool16_t mask,double * base,vuint64m4_t bindex,vfloat64m4_t value,size_t vl)2303 vfloat64m4_t test_vamoswapei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
2304   return vamoswapei64_v_f64m4_m(mask, base, bindex, value, vl);
2305 }
2306 
2307 //
2308 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
2309 // CHECK-RV64-NEXT:  entry:
2310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
2311 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2312 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
2313 //
test_vamoswapei64_v_f64m8_m(vbool8_t mask,double * base,vuint64m8_t bindex,vfloat64m8_t value,size_t vl)2314 vfloat64m8_t test_vamoswapei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
2315   return vamoswapei64_v_f64m8_m(mask, base, bindex, value, vl);
2316 }
2317 
2318