1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64( 5 <vscale x 1 x i32>*, 6 <vscale x 1 x i64>, 7 <vscale x 1 x i32>, 8 i32); 9 10define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 11; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i64: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 14; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9 15; CHECK-NEXT: vmv1r.v v8, v9 16; CHECK-NEXT: ret 17entry: 18 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64( 19 <vscale x 1 x i32> *%0, 20 <vscale x 1 x i64> %1, 21 <vscale x 1 x i32> %2, 22 i32 %3) 23 24 ret <vscale x 1 x i32> %a 25} 26 27declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64( 28 <vscale x 1 x i32>*, 29 <vscale x 1 x i64>, 30 <vscale x 1 x i32>, 31 <vscale x 1 x i1>, 32 i32); 33 34define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 35; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64: 36; CHECK: # %bb.0: # %entry 37; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 38; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t 39; CHECK-NEXT: vmv1r.v v8, v9 40; CHECK-NEXT: ret 41entry: 42 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64( 43 <vscale x 1 x i32> *%0, 44 <vscale x 1 x i64> %1, 45 <vscale x 1 x i32> %2, 46 <vscale x 1 x i1> %3, 47 i32 %4) 48 49 ret <vscale x 1 x i32> %a 50} 51 52declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64( 53 <vscale x 2 x i32>*, 54 <vscale x 2 x i64>, 55 <vscale x 2 x i32>, 56 i32); 57 58define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 59; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i64: 60; CHECK: # %bb.0: # %entry 61; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 62; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10 63; CHECK-NEXT: vmv1r.v v8, v10 64; CHECK-NEXT: ret 65entry: 66 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64( 67 <vscale x 2 x i32> *%0, 68 <vscale x 2 x i64> %1, 69 <vscale x 2 x i32> %2, 70 i32 %3) 71 72 ret <vscale x 2 x i32> %a 73} 74 75declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64( 76 <vscale x 2 x i32>*, 77 <vscale x 2 x i64>, 78 <vscale x 2 x i32>, 79 <vscale x 2 x i1>, 80 i32); 81 82define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 83; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64: 84; CHECK: # %bb.0: # %entry 85; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 86; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t 87; CHECK-NEXT: vmv1r.v v8, v10 88; CHECK-NEXT: ret 89entry: 90 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64( 91 <vscale x 2 x i32> *%0, 92 <vscale x 2 x i64> %1, 93 <vscale x 2 x i32> %2, 94 <vscale x 2 x i1> %3, 95 i32 %4) 96 97 ret <vscale x 2 x i32> %a 98} 99 100declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64( 101 <vscale x 4 x i32>*, 102 <vscale x 4 x i64>, 103 <vscale x 4 x i32>, 104 i32); 105 106define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 107; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i64: 108; CHECK: # %bb.0: # %entry 109; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 110; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12 111; CHECK-NEXT: vmv2r.v v8, v12 112; CHECK-NEXT: ret 113entry: 114 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64( 115 <vscale x 4 x i32> *%0, 116 <vscale x 4 x i64> %1, 117 <vscale x 4 x i32> %2, 118 i32 %3) 119 120 ret <vscale x 4 x i32> %a 121} 122 123declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64( 124 <vscale x 4 x i32>*, 125 <vscale x 4 x i64>, 126 <vscale x 4 x i32>, 127 <vscale x 4 x i1>, 128 i32); 129 130define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 131; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64: 132; CHECK: # %bb.0: # %entry 133; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 134; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t 135; CHECK-NEXT: vmv2r.v v8, v12 136; CHECK-NEXT: ret 137entry: 138 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64( 139 <vscale x 4 x i32> *%0, 140 <vscale x 4 x i64> %1, 141 <vscale x 4 x i32> %2, 142 <vscale x 4 x i1> %3, 143 i32 %4) 144 145 ret <vscale x 4 x i32> %a 146} 147 148declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64( 149 <vscale x 8 x i32>*, 150 <vscale x 8 x i64>, 151 <vscale x 8 x i32>, 152 i32); 153 154define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 155; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i64: 156; CHECK: # %bb.0: # %entry 157; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 158; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16 159; CHECK-NEXT: vmv4r.v v8, v16 160; CHECK-NEXT: ret 161entry: 162 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64( 163 <vscale x 8 x i32> *%0, 164 <vscale x 8 x i64> %1, 165 <vscale x 8 x i32> %2, 166 i32 %3) 167 168 ret <vscale x 8 x i32> %a 169} 170 171declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64( 172 <vscale x 8 x i32>*, 173 <vscale x 8 x i64>, 174 <vscale x 8 x i32>, 175 <vscale x 8 x i1>, 176 i32); 177 178define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 179; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64: 180; CHECK: # %bb.0: # %entry 181; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 182; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t 183; CHECK-NEXT: vmv4r.v v8, v16 184; CHECK-NEXT: ret 185entry: 186 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64( 187 <vscale x 8 x i32> *%0, 188 <vscale x 8 x i64> %1, 189 <vscale x 8 x i32> %2, 190 <vscale x 8 x i1> %3, 191 i32 %4) 192 193 ret <vscale x 8 x i32> %a 194} 195 196declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64( 197 <vscale x 1 x i64>*, 198 <vscale x 1 x i64>, 199 <vscale x 1 x i64>, 200 i32); 201 202define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 203; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i64: 204; CHECK: # %bb.0: # %entry 205; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 206; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9 207; CHECK-NEXT: vmv1r.v v8, v9 208; CHECK-NEXT: ret 209entry: 210 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64( 211 <vscale x 1 x i64> *%0, 212 <vscale x 1 x i64> %1, 213 <vscale x 1 x i64> %2, 214 i32 %3) 215 216 ret <vscale x 1 x i64> %a 217} 218 219declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64( 220 <vscale x 1 x i64>*, 221 <vscale x 1 x i64>, 222 <vscale x 1 x i64>, 223 <vscale x 1 x i1>, 224 i32); 225 226define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 227; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64: 228; CHECK: # %bb.0: # %entry 229; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 230; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t 231; CHECK-NEXT: vmv1r.v v8, v9 232; CHECK-NEXT: ret 233entry: 234 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64( 235 <vscale x 1 x i64> *%0, 236 <vscale x 1 x i64> %1, 237 <vscale x 1 x i64> %2, 238 <vscale x 1 x i1> %3, 239 i32 %4) 240 241 ret <vscale x 1 x i64> %a 242} 243 244declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64( 245 <vscale x 2 x i64>*, 246 <vscale x 2 x i64>, 247 <vscale x 2 x i64>, 248 i32); 249 250define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 251; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i64: 252; CHECK: # %bb.0: # %entry 253; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 254; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10 255; CHECK-NEXT: vmv2r.v v8, v10 256; CHECK-NEXT: ret 257entry: 258 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64( 259 <vscale x 2 x i64> *%0, 260 <vscale x 2 x i64> %1, 261 <vscale x 2 x i64> %2, 262 i32 %3) 263 264 ret <vscale x 2 x i64> %a 265} 266 267declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64( 268 <vscale x 2 x i64>*, 269 <vscale x 2 x i64>, 270 <vscale x 2 x i64>, 271 <vscale x 2 x i1>, 272 i32); 273 274define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 275; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64: 276; CHECK: # %bb.0: # %entry 277; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 278; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t 279; CHECK-NEXT: vmv2r.v v8, v10 280; CHECK-NEXT: ret 281entry: 282 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64( 283 <vscale x 2 x i64> *%0, 284 <vscale x 2 x i64> %1, 285 <vscale x 2 x i64> %2, 286 <vscale x 2 x i1> %3, 287 i32 %4) 288 289 ret <vscale x 2 x i64> %a 290} 291 292declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64( 293 <vscale x 4 x i64>*, 294 <vscale x 4 x i64>, 295 <vscale x 4 x i64>, 296 i32); 297 298define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 299; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i64: 300; CHECK: # %bb.0: # %entry 301; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 302; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12 303; CHECK-NEXT: vmv4r.v v8, v12 304; CHECK-NEXT: ret 305entry: 306 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64( 307 <vscale x 4 x i64> *%0, 308 <vscale x 4 x i64> %1, 309 <vscale x 4 x i64> %2, 310 i32 %3) 311 312 ret <vscale x 4 x i64> %a 313} 314 315declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64( 316 <vscale x 4 x i64>*, 317 <vscale x 4 x i64>, 318 <vscale x 4 x i64>, 319 <vscale x 4 x i1>, 320 i32); 321 322define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 323; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64: 324; CHECK: # %bb.0: # %entry 325; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 326; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t 327; CHECK-NEXT: vmv4r.v v8, v12 328; CHECK-NEXT: ret 329entry: 330 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64( 331 <vscale x 4 x i64> *%0, 332 <vscale x 4 x i64> %1, 333 <vscale x 4 x i64> %2, 334 <vscale x 4 x i1> %3, 335 i32 %4) 336 337 ret <vscale x 4 x i64> %a 338} 339 340declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64( 341 <vscale x 8 x i64>*, 342 <vscale x 8 x i64>, 343 <vscale x 8 x i64>, 344 i32); 345 346define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind { 347; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i64: 348; CHECK: # %bb.0: # %entry 349; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 350; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16 351; CHECK-NEXT: vmv8r.v v8, v16 352; CHECK-NEXT: ret 353entry: 354 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64( 355 <vscale x 8 x i64> *%0, 356 <vscale x 8 x i64> %1, 357 <vscale x 8 x i64> %2, 358 i32 %3) 359 360 ret <vscale x 8 x i64> %a 361} 362 363declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64( 364 <vscale x 8 x i64>*, 365 <vscale x 8 x i64>, 366 <vscale x 8 x i64>, 367 <vscale x 8 x i1>, 368 i32); 369 370define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 371; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64: 372; CHECK: # %bb.0: # %entry 373; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 374; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t 375; CHECK-NEXT: vmv8r.v v8, v16 376; CHECK-NEXT: ret 377entry: 378 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64( 379 <vscale x 8 x i64> *%0, 380 <vscale x 8 x i64> %1, 381 <vscale x 8 x i64> %2, 382 <vscale x 8 x i1> %3, 383 i32 %4) 384 385 ret <vscale x 8 x i64> %a 386} 387 388declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64( 389 <vscale x 1 x float>*, 390 <vscale x 1 x i64>, 391 <vscale x 1 x float>, 392 i32); 393 394define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, i32 %3) nounwind { 395; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i64: 396; CHECK: # %bb.0: # %entry 397; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 398; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9 399; CHECK-NEXT: vmv1r.v v8, v9 400; CHECK-NEXT: ret 401entry: 402 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64( 403 <vscale x 1 x float> *%0, 404 <vscale x 1 x i64> %1, 405 <vscale x 1 x float> %2, 406 i32 %3) 407 408 ret <vscale x 1 x float> %a 409} 410 411declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64( 412 <vscale x 1 x float>*, 413 <vscale x 1 x i64>, 414 <vscale x 1 x float>, 415 <vscale x 1 x i1>, 416 i32); 417 418define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 419; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64: 420; CHECK: # %bb.0: # %entry 421; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 422; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t 423; CHECK-NEXT: vmv1r.v v8, v9 424; CHECK-NEXT: ret 425entry: 426 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64( 427 <vscale x 1 x float> *%0, 428 <vscale x 1 x i64> %1, 429 <vscale x 1 x float> %2, 430 <vscale x 1 x i1> %3, 431 i32 %4) 432 433 ret <vscale x 1 x float> %a 434} 435 436declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64( 437 <vscale x 2 x float>*, 438 <vscale x 2 x i64>, 439 <vscale x 2 x float>, 440 i32); 441 442define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, i32 %3) nounwind { 443; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i64: 444; CHECK: # %bb.0: # %entry 445; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 446; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10 447; CHECK-NEXT: vmv1r.v v8, v10 448; CHECK-NEXT: ret 449entry: 450 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64( 451 <vscale x 2 x float> *%0, 452 <vscale x 2 x i64> %1, 453 <vscale x 2 x float> %2, 454 i32 %3) 455 456 ret <vscale x 2 x float> %a 457} 458 459declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64( 460 <vscale x 2 x float>*, 461 <vscale x 2 x i64>, 462 <vscale x 2 x float>, 463 <vscale x 2 x i1>, 464 i32); 465 466define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 467; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64: 468; CHECK: # %bb.0: # %entry 469; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 470; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t 471; CHECK-NEXT: vmv1r.v v8, v10 472; CHECK-NEXT: ret 473entry: 474 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64( 475 <vscale x 2 x float> *%0, 476 <vscale x 2 x i64> %1, 477 <vscale x 2 x float> %2, 478 <vscale x 2 x i1> %3, 479 i32 %4) 480 481 ret <vscale x 2 x float> %a 482} 483 484declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64( 485 <vscale x 4 x float>*, 486 <vscale x 4 x i64>, 487 <vscale x 4 x float>, 488 i32); 489 490define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, i32 %3) nounwind { 491; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i64: 492; CHECK: # %bb.0: # %entry 493; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 494; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12 495; CHECK-NEXT: vmv2r.v v8, v12 496; CHECK-NEXT: ret 497entry: 498 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64( 499 <vscale x 4 x float> *%0, 500 <vscale x 4 x i64> %1, 501 <vscale x 4 x float> %2, 502 i32 %3) 503 504 ret <vscale x 4 x float> %a 505} 506 507declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64( 508 <vscale x 4 x float>*, 509 <vscale x 4 x i64>, 510 <vscale x 4 x float>, 511 <vscale x 4 x i1>, 512 i32); 513 514define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 515; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64: 516; CHECK: # %bb.0: # %entry 517; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 518; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t 519; CHECK-NEXT: vmv2r.v v8, v12 520; CHECK-NEXT: ret 521entry: 522 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64( 523 <vscale x 4 x float> *%0, 524 <vscale x 4 x i64> %1, 525 <vscale x 4 x float> %2, 526 <vscale x 4 x i1> %3, 527 i32 %4) 528 529 ret <vscale x 4 x float> %a 530} 531 532declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64( 533 <vscale x 8 x float>*, 534 <vscale x 8 x i64>, 535 <vscale x 8 x float>, 536 i32); 537 538define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, i32 %3) nounwind { 539; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i64: 540; CHECK: # %bb.0: # %entry 541; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 542; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16 543; CHECK-NEXT: vmv4r.v v8, v16 544; CHECK-NEXT: ret 545entry: 546 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64( 547 <vscale x 8 x float> *%0, 548 <vscale x 8 x i64> %1, 549 <vscale x 8 x float> %2, 550 i32 %3) 551 552 ret <vscale x 8 x float> %a 553} 554 555declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64( 556 <vscale x 8 x float>*, 557 <vscale x 8 x i64>, 558 <vscale x 8 x float>, 559 <vscale x 8 x i1>, 560 i32); 561 562define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 563; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64: 564; CHECK: # %bb.0: # %entry 565; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 566; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t 567; CHECK-NEXT: vmv4r.v v8, v16 568; CHECK-NEXT: ret 569entry: 570 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64( 571 <vscale x 8 x float> *%0, 572 <vscale x 8 x i64> %1, 573 <vscale x 8 x float> %2, 574 <vscale x 8 x i1> %3, 575 i32 %4) 576 577 ret <vscale x 8 x float> %a 578} 579 580declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64( 581 <vscale x 1 x double>*, 582 <vscale x 1 x i64>, 583 <vscale x 1 x double>, 584 i32); 585 586define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, i32 %3) nounwind { 587; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i64: 588; CHECK: # %bb.0: # %entry 589; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 590; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9 591; CHECK-NEXT: vmv1r.v v8, v9 592; CHECK-NEXT: ret 593entry: 594 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64( 595 <vscale x 1 x double> *%0, 596 <vscale x 1 x i64> %1, 597 <vscale x 1 x double> %2, 598 i32 %3) 599 600 ret <vscale x 1 x double> %a 601} 602 603declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64( 604 <vscale x 1 x double>*, 605 <vscale x 1 x i64>, 606 <vscale x 1 x double>, 607 <vscale x 1 x i1>, 608 i32); 609 610define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 611; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64: 612; CHECK: # %bb.0: # %entry 613; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 614; CHECK-NEXT: vamoswapei64.v v9, (a0), v8, v9, v0.t 615; CHECK-NEXT: vmv1r.v v8, v9 616; CHECK-NEXT: ret 617entry: 618 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64( 619 <vscale x 1 x double> *%0, 620 <vscale x 1 x i64> %1, 621 <vscale x 1 x double> %2, 622 <vscale x 1 x i1> %3, 623 i32 %4) 624 625 ret <vscale x 1 x double> %a 626} 627 628declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64( 629 <vscale x 2 x double>*, 630 <vscale x 2 x i64>, 631 <vscale x 2 x double>, 632 i32); 633 634define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, i32 %3) nounwind { 635; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i64: 636; CHECK: # %bb.0: # %entry 637; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 638; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10 639; CHECK-NEXT: vmv2r.v v8, v10 640; CHECK-NEXT: ret 641entry: 642 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64( 643 <vscale x 2 x double> *%0, 644 <vscale x 2 x i64> %1, 645 <vscale x 2 x double> %2, 646 i32 %3) 647 648 ret <vscale x 2 x double> %a 649} 650 651declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64( 652 <vscale x 2 x double>*, 653 <vscale x 2 x i64>, 654 <vscale x 2 x double>, 655 <vscale x 2 x i1>, 656 i32); 657 658define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 659; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64: 660; CHECK: # %bb.0: # %entry 661; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 662; CHECK-NEXT: vamoswapei64.v v10, (a0), v8, v10, v0.t 663; CHECK-NEXT: vmv2r.v v8, v10 664; CHECK-NEXT: ret 665entry: 666 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64( 667 <vscale x 2 x double> *%0, 668 <vscale x 2 x i64> %1, 669 <vscale x 2 x double> %2, 670 <vscale x 2 x i1> %3, 671 i32 %4) 672 673 ret <vscale x 2 x double> %a 674} 675 676declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64( 677 <vscale x 4 x double>*, 678 <vscale x 4 x i64>, 679 <vscale x 4 x double>, 680 i32); 681 682define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, i32 %3) nounwind { 683; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i64: 684; CHECK: # %bb.0: # %entry 685; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 686; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12 687; CHECK-NEXT: vmv4r.v v8, v12 688; CHECK-NEXT: ret 689entry: 690 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64( 691 <vscale x 4 x double> *%0, 692 <vscale x 4 x i64> %1, 693 <vscale x 4 x double> %2, 694 i32 %3) 695 696 ret <vscale x 4 x double> %a 697} 698 699declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64( 700 <vscale x 4 x double>*, 701 <vscale x 4 x i64>, 702 <vscale x 4 x double>, 703 <vscale x 4 x i1>, 704 i32); 705 706define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 707; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64: 708; CHECK: # %bb.0: # %entry 709; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 710; CHECK-NEXT: vamoswapei64.v v12, (a0), v8, v12, v0.t 711; CHECK-NEXT: vmv4r.v v8, v12 712; CHECK-NEXT: ret 713entry: 714 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64( 715 <vscale x 4 x double> *%0, 716 <vscale x 4 x i64> %1, 717 <vscale x 4 x double> %2, 718 <vscale x 4 x i1> %3, 719 i32 %4) 720 721 ret <vscale x 4 x double> %a 722} 723 724declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64( 725 <vscale x 8 x double>*, 726 <vscale x 8 x i64>, 727 <vscale x 8 x double>, 728 i32); 729 730define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, i32 %3) nounwind { 731; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i64: 732; CHECK: # %bb.0: # %entry 733; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 734; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16 735; CHECK-NEXT: vmv8r.v v8, v16 736; CHECK-NEXT: ret 737entry: 738 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64( 739 <vscale x 8 x double> *%0, 740 <vscale x 8 x i64> %1, 741 <vscale x 8 x double> %2, 742 i32 %3) 743 744 ret <vscale x 8 x double> %a 745} 746 747declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64( 748 <vscale x 8 x double>*, 749 <vscale x 8 x i64>, 750 <vscale x 8 x double>, 751 <vscale x 8 x i1>, 752 i32); 753 754define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 755; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64: 756; CHECK: # %bb.0: # %entry 757; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 758; CHECK-NEXT: vamoswapei64.v v16, (a0), v8, v16, v0.t 759; CHECK-NEXT: vmv8r.v v8, v16 760; CHECK-NEXT: ret 761entry: 762 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64( 763 <vscale x 8 x double> *%0, 764 <vscale x 8 x i64> %1, 765 <vscale x 8 x double> %2, 766 <vscale x 8 x i1> %3, 767 i32 %4) 768 769 ret <vscale x 8 x double> %a 770} 771 772declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32( 773 <vscale x 1 x i32>*, 774 <vscale x 1 x i32>, 775 <vscale x 1 x i32>, 776 i32); 777 778define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 779; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i32: 780; CHECK: # %bb.0: # %entry 781; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 782; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 783; CHECK-NEXT: vmv1r.v v8, v9 784; CHECK-NEXT: ret 785entry: 786 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32( 787 <vscale x 1 x i32> *%0, 788 <vscale x 1 x i32> %1, 789 <vscale x 1 x i32> %2, 790 i32 %3) 791 792 ret <vscale x 1 x i32> %a 793} 794 795declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( 796 <vscale x 1 x i32>*, 797 <vscale x 1 x i32>, 798 <vscale x 1 x i32>, 799 <vscale x 1 x i1>, 800 i32); 801 802define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 803; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32: 804; CHECK: # %bb.0: # %entry 805; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 806; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 807; CHECK-NEXT: vmv1r.v v8, v9 808; CHECK-NEXT: ret 809entry: 810 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( 811 <vscale x 1 x i32> *%0, 812 <vscale x 1 x i32> %1, 813 <vscale x 1 x i32> %2, 814 <vscale x 1 x i1> %3, 815 i32 %4) 816 817 ret <vscale x 1 x i32> %a 818} 819 820declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32( 821 <vscale x 2 x i32>*, 822 <vscale x 2 x i32>, 823 <vscale x 2 x i32>, 824 i32); 825 826define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 827; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i32: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 830; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 831; CHECK-NEXT: vmv1r.v v8, v9 832; CHECK-NEXT: ret 833entry: 834 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32( 835 <vscale x 2 x i32> *%0, 836 <vscale x 2 x i32> %1, 837 <vscale x 2 x i32> %2, 838 i32 %3) 839 840 ret <vscale x 2 x i32> %a 841} 842 843declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( 844 <vscale x 2 x i32>*, 845 <vscale x 2 x i32>, 846 <vscale x 2 x i32>, 847 <vscale x 2 x i1>, 848 i32); 849 850define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 851; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32: 852; CHECK: # %bb.0: # %entry 853; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 854; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 855; CHECK-NEXT: vmv1r.v v8, v9 856; CHECK-NEXT: ret 857entry: 858 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( 859 <vscale x 2 x i32> *%0, 860 <vscale x 2 x i32> %1, 861 <vscale x 2 x i32> %2, 862 <vscale x 2 x i1> %3, 863 i32 %4) 864 865 ret <vscale x 2 x i32> %a 866} 867 868declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32( 869 <vscale x 4 x i32>*, 870 <vscale x 4 x i32>, 871 <vscale x 4 x i32>, 872 i32); 873 874define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 875; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i32: 876; CHECK: # %bb.0: # %entry 877; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 878; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10 879; CHECK-NEXT: vmv2r.v v8, v10 880; CHECK-NEXT: ret 881entry: 882 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32( 883 <vscale x 4 x i32> *%0, 884 <vscale x 4 x i32> %1, 885 <vscale x 4 x i32> %2, 886 i32 %3) 887 888 ret <vscale x 4 x i32> %a 889} 890 891declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( 892 <vscale x 4 x i32>*, 893 <vscale x 4 x i32>, 894 <vscale x 4 x i32>, 895 <vscale x 4 x i1>, 896 i32); 897 898define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 899; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32: 900; CHECK: # %bb.0: # %entry 901; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 902; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10, v0.t 903; CHECK-NEXT: vmv2r.v v8, v10 904; CHECK-NEXT: ret 905entry: 906 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( 907 <vscale x 4 x i32> *%0, 908 <vscale x 4 x i32> %1, 909 <vscale x 4 x i32> %2, 910 <vscale x 4 x i1> %3, 911 i32 %4) 912 913 ret <vscale x 4 x i32> %a 914} 915 916declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32( 917 <vscale x 8 x i32>*, 918 <vscale x 8 x i32>, 919 <vscale x 8 x i32>, 920 i32); 921 922define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 923; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i32: 924; CHECK: # %bb.0: # %entry 925; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 926; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12 927; CHECK-NEXT: vmv4r.v v8, v12 928; CHECK-NEXT: ret 929entry: 930 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32( 931 <vscale x 8 x i32> *%0, 932 <vscale x 8 x i32> %1, 933 <vscale x 8 x i32> %2, 934 i32 %3) 935 936 ret <vscale x 8 x i32> %a 937} 938 939declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( 940 <vscale x 8 x i32>*, 941 <vscale x 8 x i32>, 942 <vscale x 8 x i32>, 943 <vscale x 8 x i1>, 944 i32); 945 946define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 947; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32: 948; CHECK: # %bb.0: # %entry 949; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 950; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12, v0.t 951; CHECK-NEXT: vmv4r.v v8, v12 952; CHECK-NEXT: ret 953entry: 954 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( 955 <vscale x 8 x i32> *%0, 956 <vscale x 8 x i32> %1, 957 <vscale x 8 x i32> %2, 958 <vscale x 8 x i1> %3, 959 i32 %4) 960 961 ret <vscale x 8 x i32> %a 962} 963 964declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32( 965 <vscale x 16 x i32>*, 966 <vscale x 16 x i32>, 967 <vscale x 16 x i32>, 968 i32); 969 970define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind { 971; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i32: 972; CHECK: # %bb.0: # %entry 973; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 974; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16 975; CHECK-NEXT: vmv8r.v v8, v16 976; CHECK-NEXT: ret 977entry: 978 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32( 979 <vscale x 16 x i32> *%0, 980 <vscale x 16 x i32> %1, 981 <vscale x 16 x i32> %2, 982 i32 %3) 983 984 ret <vscale x 16 x i32> %a 985} 986 987declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( 988 <vscale x 16 x i32>*, 989 <vscale x 16 x i32>, 990 <vscale x 16 x i32>, 991 <vscale x 16 x i1>, 992 i32); 993 994define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 995; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32: 996; CHECK: # %bb.0: # %entry 997; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 998; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t 999; CHECK-NEXT: vmv8r.v v8, v16 1000; CHECK-NEXT: ret 1001entry: 1002 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( 1003 <vscale x 16 x i32> *%0, 1004 <vscale x 16 x i32> %1, 1005 <vscale x 16 x i32> %2, 1006 <vscale x 16 x i1> %3, 1007 i32 %4) 1008 1009 ret <vscale x 16 x i32> %a 1010} 1011 1012declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32( 1013 <vscale x 1 x i64>*, 1014 <vscale x 1 x i32>, 1015 <vscale x 1 x i64>, 1016 i32); 1017 1018define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 1019; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i32: 1020; CHECK: # %bb.0: # %entry 1021; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1022; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 1023; CHECK-NEXT: vmv1r.v v8, v9 1024; CHECK-NEXT: ret 1025entry: 1026 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32( 1027 <vscale x 1 x i64> *%0, 1028 <vscale x 1 x i32> %1, 1029 <vscale x 1 x i64> %2, 1030 i32 %3) 1031 1032 ret <vscale x 1 x i64> %a 1033} 1034 1035declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32( 1036 <vscale x 1 x i64>*, 1037 <vscale x 1 x i32>, 1038 <vscale x 1 x i64>, 1039 <vscale x 1 x i1>, 1040 i32); 1041 1042define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1043; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32: 1044; CHECK: # %bb.0: # %entry 1045; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1046; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 1047; CHECK-NEXT: vmv1r.v v8, v9 1048; CHECK-NEXT: ret 1049entry: 1050 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32( 1051 <vscale x 1 x i64> *%0, 1052 <vscale x 1 x i32> %1, 1053 <vscale x 1 x i64> %2, 1054 <vscale x 1 x i1> %3, 1055 i32 %4) 1056 1057 ret <vscale x 1 x i64> %a 1058} 1059 1060declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32( 1061 <vscale x 2 x i64>*, 1062 <vscale x 2 x i32>, 1063 <vscale x 2 x i64>, 1064 i32); 1065 1066define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 1067; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i32: 1068; CHECK: # %bb.0: # %entry 1069; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1070; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10 1071; CHECK-NEXT: vmv2r.v v8, v10 1072; CHECK-NEXT: ret 1073entry: 1074 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32( 1075 <vscale x 2 x i64> *%0, 1076 <vscale x 2 x i32> %1, 1077 <vscale x 2 x i64> %2, 1078 i32 %3) 1079 1080 ret <vscale x 2 x i64> %a 1081} 1082 1083declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32( 1084 <vscale x 2 x i64>*, 1085 <vscale x 2 x i32>, 1086 <vscale x 2 x i64>, 1087 <vscale x 2 x i1>, 1088 i32); 1089 1090define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1091; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32: 1092; CHECK: # %bb.0: # %entry 1093; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1094; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10, v0.t 1095; CHECK-NEXT: vmv2r.v v8, v10 1096; CHECK-NEXT: ret 1097entry: 1098 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32( 1099 <vscale x 2 x i64> *%0, 1100 <vscale x 2 x i32> %1, 1101 <vscale x 2 x i64> %2, 1102 <vscale x 2 x i1> %3, 1103 i32 %4) 1104 1105 ret <vscale x 2 x i64> %a 1106} 1107 1108declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32( 1109 <vscale x 4 x i64>*, 1110 <vscale x 4 x i32>, 1111 <vscale x 4 x i64>, 1112 i32); 1113 1114define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 1115; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i32: 1116; CHECK: # %bb.0: # %entry 1117; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1118; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12 1119; CHECK-NEXT: vmv4r.v v8, v12 1120; CHECK-NEXT: ret 1121entry: 1122 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32( 1123 <vscale x 4 x i64> *%0, 1124 <vscale x 4 x i32> %1, 1125 <vscale x 4 x i64> %2, 1126 i32 %3) 1127 1128 ret <vscale x 4 x i64> %a 1129} 1130 1131declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32( 1132 <vscale x 4 x i64>*, 1133 <vscale x 4 x i32>, 1134 <vscale x 4 x i64>, 1135 <vscale x 4 x i1>, 1136 i32); 1137 1138define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1139; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32: 1140; CHECK: # %bb.0: # %entry 1141; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1142; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12, v0.t 1143; CHECK-NEXT: vmv4r.v v8, v12 1144; CHECK-NEXT: ret 1145entry: 1146 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32( 1147 <vscale x 4 x i64> *%0, 1148 <vscale x 4 x i32> %1, 1149 <vscale x 4 x i64> %2, 1150 <vscale x 4 x i1> %3, 1151 i32 %4) 1152 1153 ret <vscale x 4 x i64> %a 1154} 1155 1156declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32( 1157 <vscale x 8 x i64>*, 1158 <vscale x 8 x i32>, 1159 <vscale x 8 x i64>, 1160 i32); 1161 1162define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind { 1163; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i32: 1164; CHECK: # %bb.0: # %entry 1165; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1166; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16 1167; CHECK-NEXT: vmv8r.v v8, v16 1168; CHECK-NEXT: ret 1169entry: 1170 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32( 1171 <vscale x 8 x i64> *%0, 1172 <vscale x 8 x i32> %1, 1173 <vscale x 8 x i64> %2, 1174 i32 %3) 1175 1176 ret <vscale x 8 x i64> %a 1177} 1178 1179declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32( 1180 <vscale x 8 x i64>*, 1181 <vscale x 8 x i32>, 1182 <vscale x 8 x i64>, 1183 <vscale x 8 x i1>, 1184 i32); 1185 1186define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1187; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32: 1188; CHECK: # %bb.0: # %entry 1189; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1190; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t 1191; CHECK-NEXT: vmv8r.v v8, v16 1192; CHECK-NEXT: ret 1193entry: 1194 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32( 1195 <vscale x 8 x i64> *%0, 1196 <vscale x 8 x i32> %1, 1197 <vscale x 8 x i64> %2, 1198 <vscale x 8 x i1> %3, 1199 i32 %4) 1200 1201 ret <vscale x 8 x i64> %a 1202} 1203 1204declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32( 1205 <vscale x 1 x float>*, 1206 <vscale x 1 x i32>, 1207 <vscale x 1 x float>, 1208 i32); 1209 1210define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, i32 %3) nounwind { 1211; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i32: 1212; CHECK: # %bb.0: # %entry 1213; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1214; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 1215; CHECK-NEXT: vmv1r.v v8, v9 1216; CHECK-NEXT: ret 1217entry: 1218 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32( 1219 <vscale x 1 x float> *%0, 1220 <vscale x 1 x i32> %1, 1221 <vscale x 1 x float> %2, 1222 i32 %3) 1223 1224 ret <vscale x 1 x float> %a 1225} 1226 1227declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( 1228 <vscale x 1 x float>*, 1229 <vscale x 1 x i32>, 1230 <vscale x 1 x float>, 1231 <vscale x 1 x i1>, 1232 i32); 1233 1234define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1235; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32: 1236; CHECK: # %bb.0: # %entry 1237; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1238; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 1239; CHECK-NEXT: vmv1r.v v8, v9 1240; CHECK-NEXT: ret 1241entry: 1242 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( 1243 <vscale x 1 x float> *%0, 1244 <vscale x 1 x i32> %1, 1245 <vscale x 1 x float> %2, 1246 <vscale x 1 x i1> %3, 1247 i32 %4) 1248 1249 ret <vscale x 1 x float> %a 1250} 1251 1252declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32( 1253 <vscale x 2 x float>*, 1254 <vscale x 2 x i32>, 1255 <vscale x 2 x float>, 1256 i32); 1257 1258define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, i32 %3) nounwind { 1259; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i32: 1260; CHECK: # %bb.0: # %entry 1261; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1262; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 1263; CHECK-NEXT: vmv1r.v v8, v9 1264; CHECK-NEXT: ret 1265entry: 1266 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32( 1267 <vscale x 2 x float> *%0, 1268 <vscale x 2 x i32> %1, 1269 <vscale x 2 x float> %2, 1270 i32 %3) 1271 1272 ret <vscale x 2 x float> %a 1273} 1274 1275declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( 1276 <vscale x 2 x float>*, 1277 <vscale x 2 x i32>, 1278 <vscale x 2 x float>, 1279 <vscale x 2 x i1>, 1280 i32); 1281 1282define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1283; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32: 1284; CHECK: # %bb.0: # %entry 1285; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1286; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 1287; CHECK-NEXT: vmv1r.v v8, v9 1288; CHECK-NEXT: ret 1289entry: 1290 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( 1291 <vscale x 2 x float> *%0, 1292 <vscale x 2 x i32> %1, 1293 <vscale x 2 x float> %2, 1294 <vscale x 2 x i1> %3, 1295 i32 %4) 1296 1297 ret <vscale x 2 x float> %a 1298} 1299 1300declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32( 1301 <vscale x 4 x float>*, 1302 <vscale x 4 x i32>, 1303 <vscale x 4 x float>, 1304 i32); 1305 1306define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, i32 %3) nounwind { 1307; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i32: 1308; CHECK: # %bb.0: # %entry 1309; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1310; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10 1311; CHECK-NEXT: vmv2r.v v8, v10 1312; CHECK-NEXT: ret 1313entry: 1314 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32( 1315 <vscale x 4 x float> *%0, 1316 <vscale x 4 x i32> %1, 1317 <vscale x 4 x float> %2, 1318 i32 %3) 1319 1320 ret <vscale x 4 x float> %a 1321} 1322 1323declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( 1324 <vscale x 4 x float>*, 1325 <vscale x 4 x i32>, 1326 <vscale x 4 x float>, 1327 <vscale x 4 x i1>, 1328 i32); 1329 1330define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1331; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32: 1332; CHECK: # %bb.0: # %entry 1333; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1334; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10, v0.t 1335; CHECK-NEXT: vmv2r.v v8, v10 1336; CHECK-NEXT: ret 1337entry: 1338 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( 1339 <vscale x 4 x float> *%0, 1340 <vscale x 4 x i32> %1, 1341 <vscale x 4 x float> %2, 1342 <vscale x 4 x i1> %3, 1343 i32 %4) 1344 1345 ret <vscale x 4 x float> %a 1346} 1347 1348declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32( 1349 <vscale x 8 x float>*, 1350 <vscale x 8 x i32>, 1351 <vscale x 8 x float>, 1352 i32); 1353 1354define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, i32 %3) nounwind { 1355; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i32: 1356; CHECK: # %bb.0: # %entry 1357; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1358; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12 1359; CHECK-NEXT: vmv4r.v v8, v12 1360; CHECK-NEXT: ret 1361entry: 1362 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32( 1363 <vscale x 8 x float> *%0, 1364 <vscale x 8 x i32> %1, 1365 <vscale x 8 x float> %2, 1366 i32 %3) 1367 1368 ret <vscale x 8 x float> %a 1369} 1370 1371declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( 1372 <vscale x 8 x float>*, 1373 <vscale x 8 x i32>, 1374 <vscale x 8 x float>, 1375 <vscale x 8 x i1>, 1376 i32); 1377 1378define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1379; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32: 1380; CHECK: # %bb.0: # %entry 1381; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1382; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12, v0.t 1383; CHECK-NEXT: vmv4r.v v8, v12 1384; CHECK-NEXT: ret 1385entry: 1386 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( 1387 <vscale x 8 x float> *%0, 1388 <vscale x 8 x i32> %1, 1389 <vscale x 8 x float> %2, 1390 <vscale x 8 x i1> %3, 1391 i32 %4) 1392 1393 ret <vscale x 8 x float> %a 1394} 1395 1396declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32( 1397 <vscale x 16 x float>*, 1398 <vscale x 16 x i32>, 1399 <vscale x 16 x float>, 1400 i32); 1401 1402define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, i32 %3) nounwind { 1403; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i32: 1404; CHECK: # %bb.0: # %entry 1405; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1406; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16 1407; CHECK-NEXT: vmv8r.v v8, v16 1408; CHECK-NEXT: ret 1409entry: 1410 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32( 1411 <vscale x 16 x float> *%0, 1412 <vscale x 16 x i32> %1, 1413 <vscale x 16 x float> %2, 1414 i32 %3) 1415 1416 ret <vscale x 16 x float> %a 1417} 1418 1419declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( 1420 <vscale x 16 x float>*, 1421 <vscale x 16 x i32>, 1422 <vscale x 16 x float>, 1423 <vscale x 16 x i1>, 1424 i32); 1425 1426define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1427; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32: 1428; CHECK: # %bb.0: # %entry 1429; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1430; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t 1431; CHECK-NEXT: vmv8r.v v8, v16 1432; CHECK-NEXT: ret 1433entry: 1434 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( 1435 <vscale x 16 x float> *%0, 1436 <vscale x 16 x i32> %1, 1437 <vscale x 16 x float> %2, 1438 <vscale x 16 x i1> %3, 1439 i32 %4) 1440 1441 ret <vscale x 16 x float> %a 1442} 1443 1444declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32( 1445 <vscale x 1 x double>*, 1446 <vscale x 1 x i32>, 1447 <vscale x 1 x double>, 1448 i32); 1449 1450define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, i32 %3) nounwind { 1451; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i32: 1452; CHECK: # %bb.0: # %entry 1453; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1454; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9 1455; CHECK-NEXT: vmv1r.v v8, v9 1456; CHECK-NEXT: ret 1457entry: 1458 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32( 1459 <vscale x 1 x double> *%0, 1460 <vscale x 1 x i32> %1, 1461 <vscale x 1 x double> %2, 1462 i32 %3) 1463 1464 ret <vscale x 1 x double> %a 1465} 1466 1467declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( 1468 <vscale x 1 x double>*, 1469 <vscale x 1 x i32>, 1470 <vscale x 1 x double>, 1471 <vscale x 1 x i1>, 1472 i32); 1473 1474define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1475; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32: 1476; CHECK: # %bb.0: # %entry 1477; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1478; CHECK-NEXT: vamoswapei32.v v9, (a0), v8, v9, v0.t 1479; CHECK-NEXT: vmv1r.v v8, v9 1480; CHECK-NEXT: ret 1481entry: 1482 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( 1483 <vscale x 1 x double> *%0, 1484 <vscale x 1 x i32> %1, 1485 <vscale x 1 x double> %2, 1486 <vscale x 1 x i1> %3, 1487 i32 %4) 1488 1489 ret <vscale x 1 x double> %a 1490} 1491 1492declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32( 1493 <vscale x 2 x double>*, 1494 <vscale x 2 x i32>, 1495 <vscale x 2 x double>, 1496 i32); 1497 1498define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, i32 %3) nounwind { 1499; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i32: 1500; CHECK: # %bb.0: # %entry 1501; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1502; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10 1503; CHECK-NEXT: vmv2r.v v8, v10 1504; CHECK-NEXT: ret 1505entry: 1506 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32( 1507 <vscale x 2 x double> *%0, 1508 <vscale x 2 x i32> %1, 1509 <vscale x 2 x double> %2, 1510 i32 %3) 1511 1512 ret <vscale x 2 x double> %a 1513} 1514 1515declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( 1516 <vscale x 2 x double>*, 1517 <vscale x 2 x i32>, 1518 <vscale x 2 x double>, 1519 <vscale x 2 x i1>, 1520 i32); 1521 1522define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1523; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32: 1524; CHECK: # %bb.0: # %entry 1525; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1526; CHECK-NEXT: vamoswapei32.v v10, (a0), v8, v10, v0.t 1527; CHECK-NEXT: vmv2r.v v8, v10 1528; CHECK-NEXT: ret 1529entry: 1530 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( 1531 <vscale x 2 x double> *%0, 1532 <vscale x 2 x i32> %1, 1533 <vscale x 2 x double> %2, 1534 <vscale x 2 x i1> %3, 1535 i32 %4) 1536 1537 ret <vscale x 2 x double> %a 1538} 1539 1540declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32( 1541 <vscale x 4 x double>*, 1542 <vscale x 4 x i32>, 1543 <vscale x 4 x double>, 1544 i32); 1545 1546define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, i32 %3) nounwind { 1547; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i32: 1548; CHECK: # %bb.0: # %entry 1549; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1550; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12 1551; CHECK-NEXT: vmv4r.v v8, v12 1552; CHECK-NEXT: ret 1553entry: 1554 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32( 1555 <vscale x 4 x double> *%0, 1556 <vscale x 4 x i32> %1, 1557 <vscale x 4 x double> %2, 1558 i32 %3) 1559 1560 ret <vscale x 4 x double> %a 1561} 1562 1563declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( 1564 <vscale x 4 x double>*, 1565 <vscale x 4 x i32>, 1566 <vscale x 4 x double>, 1567 <vscale x 4 x i1>, 1568 i32); 1569 1570define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1571; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32: 1572; CHECK: # %bb.0: # %entry 1573; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1574; CHECK-NEXT: vamoswapei32.v v12, (a0), v8, v12, v0.t 1575; CHECK-NEXT: vmv4r.v v8, v12 1576; CHECK-NEXT: ret 1577entry: 1578 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( 1579 <vscale x 4 x double> *%0, 1580 <vscale x 4 x i32> %1, 1581 <vscale x 4 x double> %2, 1582 <vscale x 4 x i1> %3, 1583 i32 %4) 1584 1585 ret <vscale x 4 x double> %a 1586} 1587 1588declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32( 1589 <vscale x 8 x double>*, 1590 <vscale x 8 x i32>, 1591 <vscale x 8 x double>, 1592 i32); 1593 1594define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, i32 %3) nounwind { 1595; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i32: 1596; CHECK: # %bb.0: # %entry 1597; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1598; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16 1599; CHECK-NEXT: vmv8r.v v8, v16 1600; CHECK-NEXT: ret 1601entry: 1602 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32( 1603 <vscale x 8 x double> *%0, 1604 <vscale x 8 x i32> %1, 1605 <vscale x 8 x double> %2, 1606 i32 %3) 1607 1608 ret <vscale x 8 x double> %a 1609} 1610 1611declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( 1612 <vscale x 8 x double>*, 1613 <vscale x 8 x i32>, 1614 <vscale x 8 x double>, 1615 <vscale x 8 x i1>, 1616 i32); 1617 1618define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1619; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32: 1620; CHECK: # %bb.0: # %entry 1621; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1622; CHECK-NEXT: vamoswapei32.v v16, (a0), v8, v16, v0.t 1623; CHECK-NEXT: vmv8r.v v8, v16 1624; CHECK-NEXT: ret 1625entry: 1626 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( 1627 <vscale x 8 x double> *%0, 1628 <vscale x 8 x i32> %1, 1629 <vscale x 8 x double> %2, 1630 <vscale x 8 x i1> %3, 1631 i32 %4) 1632 1633 ret <vscale x 8 x double> %a 1634} 1635 1636declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16( 1637 <vscale x 1 x i32>*, 1638 <vscale x 1 x i16>, 1639 <vscale x 1 x i32>, 1640 i32); 1641 1642define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 1643; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i16: 1644; CHECK: # %bb.0: # %entry 1645; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1646; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 1647; CHECK-NEXT: vmv1r.v v8, v9 1648; CHECK-NEXT: ret 1649entry: 1650 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16( 1651 <vscale x 1 x i32> *%0, 1652 <vscale x 1 x i16> %1, 1653 <vscale x 1 x i32> %2, 1654 i32 %3) 1655 1656 ret <vscale x 1 x i32> %a 1657} 1658 1659declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( 1660 <vscale x 1 x i32>*, 1661 <vscale x 1 x i16>, 1662 <vscale x 1 x i32>, 1663 <vscale x 1 x i1>, 1664 i32); 1665 1666define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1667; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16: 1668; CHECK: # %bb.0: # %entry 1669; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1670; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 1671; CHECK-NEXT: vmv1r.v v8, v9 1672; CHECK-NEXT: ret 1673entry: 1674 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( 1675 <vscale x 1 x i32> *%0, 1676 <vscale x 1 x i16> %1, 1677 <vscale x 1 x i32> %2, 1678 <vscale x 1 x i1> %3, 1679 i32 %4) 1680 1681 ret <vscale x 1 x i32> %a 1682} 1683 1684declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16( 1685 <vscale x 2 x i32>*, 1686 <vscale x 2 x i16>, 1687 <vscale x 2 x i32>, 1688 i32); 1689 1690define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 1691; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i16: 1692; CHECK: # %bb.0: # %entry 1693; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1694; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 1695; CHECK-NEXT: vmv1r.v v8, v9 1696; CHECK-NEXT: ret 1697entry: 1698 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16( 1699 <vscale x 2 x i32> *%0, 1700 <vscale x 2 x i16> %1, 1701 <vscale x 2 x i32> %2, 1702 i32 %3) 1703 1704 ret <vscale x 2 x i32> %a 1705} 1706 1707declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( 1708 <vscale x 2 x i32>*, 1709 <vscale x 2 x i16>, 1710 <vscale x 2 x i32>, 1711 <vscale x 2 x i1>, 1712 i32); 1713 1714define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1715; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16: 1716; CHECK: # %bb.0: # %entry 1717; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1718; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 1719; CHECK-NEXT: vmv1r.v v8, v9 1720; CHECK-NEXT: ret 1721entry: 1722 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( 1723 <vscale x 2 x i32> *%0, 1724 <vscale x 2 x i16> %1, 1725 <vscale x 2 x i32> %2, 1726 <vscale x 2 x i1> %3, 1727 i32 %4) 1728 1729 ret <vscale x 2 x i32> %a 1730} 1731 1732declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16( 1733 <vscale x 4 x i32>*, 1734 <vscale x 4 x i16>, 1735 <vscale x 4 x i32>, 1736 i32); 1737 1738define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 1739; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i16: 1740; CHECK: # %bb.0: # %entry 1741; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1742; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10 1743; CHECK-NEXT: vmv2r.v v8, v10 1744; CHECK-NEXT: ret 1745entry: 1746 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16( 1747 <vscale x 4 x i32> *%0, 1748 <vscale x 4 x i16> %1, 1749 <vscale x 4 x i32> %2, 1750 i32 %3) 1751 1752 ret <vscale x 4 x i32> %a 1753} 1754 1755declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( 1756 <vscale x 4 x i32>*, 1757 <vscale x 4 x i16>, 1758 <vscale x 4 x i32>, 1759 <vscale x 4 x i1>, 1760 i32); 1761 1762define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1763; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16: 1764; CHECK: # %bb.0: # %entry 1765; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1766; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10, v0.t 1767; CHECK-NEXT: vmv2r.v v8, v10 1768; CHECK-NEXT: ret 1769entry: 1770 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( 1771 <vscale x 4 x i32> *%0, 1772 <vscale x 4 x i16> %1, 1773 <vscale x 4 x i32> %2, 1774 <vscale x 4 x i1> %3, 1775 i32 %4) 1776 1777 ret <vscale x 4 x i32> %a 1778} 1779 1780declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16( 1781 <vscale x 8 x i32>*, 1782 <vscale x 8 x i16>, 1783 <vscale x 8 x i32>, 1784 i32); 1785 1786define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 1787; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i16: 1788; CHECK: # %bb.0: # %entry 1789; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1790; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12 1791; CHECK-NEXT: vmv4r.v v8, v12 1792; CHECK-NEXT: ret 1793entry: 1794 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16( 1795 <vscale x 8 x i32> *%0, 1796 <vscale x 8 x i16> %1, 1797 <vscale x 8 x i32> %2, 1798 i32 %3) 1799 1800 ret <vscale x 8 x i32> %a 1801} 1802 1803declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( 1804 <vscale x 8 x i32>*, 1805 <vscale x 8 x i16>, 1806 <vscale x 8 x i32>, 1807 <vscale x 8 x i1>, 1808 i32); 1809 1810define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1811; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16: 1812; CHECK: # %bb.0: # %entry 1813; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1814; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12, v0.t 1815; CHECK-NEXT: vmv4r.v v8, v12 1816; CHECK-NEXT: ret 1817entry: 1818 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( 1819 <vscale x 8 x i32> *%0, 1820 <vscale x 8 x i16> %1, 1821 <vscale x 8 x i32> %2, 1822 <vscale x 8 x i1> %3, 1823 i32 %4) 1824 1825 ret <vscale x 8 x i32> %a 1826} 1827 1828declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16( 1829 <vscale x 16 x i32>*, 1830 <vscale x 16 x i16>, 1831 <vscale x 16 x i32>, 1832 i32); 1833 1834define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind { 1835; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i16: 1836; CHECK: # %bb.0: # %entry 1837; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1838; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16 1839; CHECK-NEXT: vmv8r.v v8, v16 1840; CHECK-NEXT: ret 1841entry: 1842 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16( 1843 <vscale x 16 x i32> *%0, 1844 <vscale x 16 x i16> %1, 1845 <vscale x 16 x i32> %2, 1846 i32 %3) 1847 1848 ret <vscale x 16 x i32> %a 1849} 1850 1851declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( 1852 <vscale x 16 x i32>*, 1853 <vscale x 16 x i16>, 1854 <vscale x 16 x i32>, 1855 <vscale x 16 x i1>, 1856 i32); 1857 1858define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1859; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16: 1860; CHECK: # %bb.0: # %entry 1861; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1862; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16, v0.t 1863; CHECK-NEXT: vmv8r.v v8, v16 1864; CHECK-NEXT: ret 1865entry: 1866 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( 1867 <vscale x 16 x i32> *%0, 1868 <vscale x 16 x i16> %1, 1869 <vscale x 16 x i32> %2, 1870 <vscale x 16 x i1> %3, 1871 i32 %4) 1872 1873 ret <vscale x 16 x i32> %a 1874} 1875 1876declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16( 1877 <vscale x 1 x i64>*, 1878 <vscale x 1 x i16>, 1879 <vscale x 1 x i64>, 1880 i32); 1881 1882define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 1883; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i16: 1884; CHECK: # %bb.0: # %entry 1885; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1886; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 1887; CHECK-NEXT: vmv1r.v v8, v9 1888; CHECK-NEXT: ret 1889entry: 1890 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16( 1891 <vscale x 1 x i64> *%0, 1892 <vscale x 1 x i16> %1, 1893 <vscale x 1 x i64> %2, 1894 i32 %3) 1895 1896 ret <vscale x 1 x i64> %a 1897} 1898 1899declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16( 1900 <vscale x 1 x i64>*, 1901 <vscale x 1 x i16>, 1902 <vscale x 1 x i64>, 1903 <vscale x 1 x i1>, 1904 i32); 1905 1906define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1907; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16: 1908; CHECK: # %bb.0: # %entry 1909; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1910; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 1911; CHECK-NEXT: vmv1r.v v8, v9 1912; CHECK-NEXT: ret 1913entry: 1914 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16( 1915 <vscale x 1 x i64> *%0, 1916 <vscale x 1 x i16> %1, 1917 <vscale x 1 x i64> %2, 1918 <vscale x 1 x i1> %3, 1919 i32 %4) 1920 1921 ret <vscale x 1 x i64> %a 1922} 1923 1924declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16( 1925 <vscale x 2 x i64>*, 1926 <vscale x 2 x i16>, 1927 <vscale x 2 x i64>, 1928 i32); 1929 1930define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 1931; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i16: 1932; CHECK: # %bb.0: # %entry 1933; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1934; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10 1935; CHECK-NEXT: vmv2r.v v8, v10 1936; CHECK-NEXT: ret 1937entry: 1938 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16( 1939 <vscale x 2 x i64> *%0, 1940 <vscale x 2 x i16> %1, 1941 <vscale x 2 x i64> %2, 1942 i32 %3) 1943 1944 ret <vscale x 2 x i64> %a 1945} 1946 1947declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16( 1948 <vscale x 2 x i64>*, 1949 <vscale x 2 x i16>, 1950 <vscale x 2 x i64>, 1951 <vscale x 2 x i1>, 1952 i32); 1953 1954define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1955; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16: 1956; CHECK: # %bb.0: # %entry 1957; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1958; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10, v0.t 1959; CHECK-NEXT: vmv2r.v v8, v10 1960; CHECK-NEXT: ret 1961entry: 1962 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16( 1963 <vscale x 2 x i64> *%0, 1964 <vscale x 2 x i16> %1, 1965 <vscale x 2 x i64> %2, 1966 <vscale x 2 x i1> %3, 1967 i32 %4) 1968 1969 ret <vscale x 2 x i64> %a 1970} 1971 1972declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16( 1973 <vscale x 4 x i64>*, 1974 <vscale x 4 x i16>, 1975 <vscale x 4 x i64>, 1976 i32); 1977 1978define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 1979; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i16: 1980; CHECK: # %bb.0: # %entry 1981; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1982; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12 1983; CHECK-NEXT: vmv4r.v v8, v12 1984; CHECK-NEXT: ret 1985entry: 1986 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16( 1987 <vscale x 4 x i64> *%0, 1988 <vscale x 4 x i16> %1, 1989 <vscale x 4 x i64> %2, 1990 i32 %3) 1991 1992 ret <vscale x 4 x i64> %a 1993} 1994 1995declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16( 1996 <vscale x 4 x i64>*, 1997 <vscale x 4 x i16>, 1998 <vscale x 4 x i64>, 1999 <vscale x 4 x i1>, 2000 i32); 2001 2002define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 2003; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16: 2004; CHECK: # %bb.0: # %entry 2005; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2006; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12, v0.t 2007; CHECK-NEXT: vmv4r.v v8, v12 2008; CHECK-NEXT: ret 2009entry: 2010 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16( 2011 <vscale x 4 x i64> *%0, 2012 <vscale x 4 x i16> %1, 2013 <vscale x 4 x i64> %2, 2014 <vscale x 4 x i1> %3, 2015 i32 %4) 2016 2017 ret <vscale x 4 x i64> %a 2018} 2019 2020declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16( 2021 <vscale x 8 x i64>*, 2022 <vscale x 8 x i16>, 2023 <vscale x 8 x i64>, 2024 i32); 2025 2026define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind { 2027; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i16: 2028; CHECK: # %bb.0: # %entry 2029; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2030; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16 2031; CHECK-NEXT: vmv8r.v v8, v16 2032; CHECK-NEXT: ret 2033entry: 2034 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16( 2035 <vscale x 8 x i64> *%0, 2036 <vscale x 8 x i16> %1, 2037 <vscale x 8 x i64> %2, 2038 i32 %3) 2039 2040 ret <vscale x 8 x i64> %a 2041} 2042 2043declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16( 2044 <vscale x 8 x i64>*, 2045 <vscale x 8 x i16>, 2046 <vscale x 8 x i64>, 2047 <vscale x 8 x i1>, 2048 i32); 2049 2050define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 2051; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16: 2052; CHECK: # %bb.0: # %entry 2053; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2054; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16, v0.t 2055; CHECK-NEXT: vmv8r.v v8, v16 2056; CHECK-NEXT: ret 2057entry: 2058 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16( 2059 <vscale x 8 x i64> *%0, 2060 <vscale x 8 x i16> %1, 2061 <vscale x 8 x i64> %2, 2062 <vscale x 8 x i1> %3, 2063 i32 %4) 2064 2065 ret <vscale x 8 x i64> %a 2066} 2067 2068declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16( 2069 <vscale x 1 x float>*, 2070 <vscale x 1 x i16>, 2071 <vscale x 1 x float>, 2072 i32); 2073 2074define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, i32 %3) nounwind { 2075; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i16: 2076; CHECK: # %bb.0: # %entry 2077; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2078; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 2079; CHECK-NEXT: vmv1r.v v8, v9 2080; CHECK-NEXT: ret 2081entry: 2082 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16( 2083 <vscale x 1 x float> *%0, 2084 <vscale x 1 x i16> %1, 2085 <vscale x 1 x float> %2, 2086 i32 %3) 2087 2088 ret <vscale x 1 x float> %a 2089} 2090 2091declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( 2092 <vscale x 1 x float>*, 2093 <vscale x 1 x i16>, 2094 <vscale x 1 x float>, 2095 <vscale x 1 x i1>, 2096 i32); 2097 2098define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 2099; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16: 2100; CHECK: # %bb.0: # %entry 2101; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2102; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 2103; CHECK-NEXT: vmv1r.v v8, v9 2104; CHECK-NEXT: ret 2105entry: 2106 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( 2107 <vscale x 1 x float> *%0, 2108 <vscale x 1 x i16> %1, 2109 <vscale x 1 x float> %2, 2110 <vscale x 1 x i1> %3, 2111 i32 %4) 2112 2113 ret <vscale x 1 x float> %a 2114} 2115 2116declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16( 2117 <vscale x 2 x float>*, 2118 <vscale x 2 x i16>, 2119 <vscale x 2 x float>, 2120 i32); 2121 2122define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, i32 %3) nounwind { 2123; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i16: 2124; CHECK: # %bb.0: # %entry 2125; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2126; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 2127; CHECK-NEXT: vmv1r.v v8, v9 2128; CHECK-NEXT: ret 2129entry: 2130 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16( 2131 <vscale x 2 x float> *%0, 2132 <vscale x 2 x i16> %1, 2133 <vscale x 2 x float> %2, 2134 i32 %3) 2135 2136 ret <vscale x 2 x float> %a 2137} 2138 2139declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( 2140 <vscale x 2 x float>*, 2141 <vscale x 2 x i16>, 2142 <vscale x 2 x float>, 2143 <vscale x 2 x i1>, 2144 i32); 2145 2146define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 2147; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16: 2148; CHECK: # %bb.0: # %entry 2149; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2150; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 2151; CHECK-NEXT: vmv1r.v v8, v9 2152; CHECK-NEXT: ret 2153entry: 2154 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( 2155 <vscale x 2 x float> *%0, 2156 <vscale x 2 x i16> %1, 2157 <vscale x 2 x float> %2, 2158 <vscale x 2 x i1> %3, 2159 i32 %4) 2160 2161 ret <vscale x 2 x float> %a 2162} 2163 2164declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16( 2165 <vscale x 4 x float>*, 2166 <vscale x 4 x i16>, 2167 <vscale x 4 x float>, 2168 i32); 2169 2170define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, i32 %3) nounwind { 2171; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i16: 2172; CHECK: # %bb.0: # %entry 2173; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 2174; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10 2175; CHECK-NEXT: vmv2r.v v8, v10 2176; CHECK-NEXT: ret 2177entry: 2178 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16( 2179 <vscale x 4 x float> *%0, 2180 <vscale x 4 x i16> %1, 2181 <vscale x 4 x float> %2, 2182 i32 %3) 2183 2184 ret <vscale x 4 x float> %a 2185} 2186 2187declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( 2188 <vscale x 4 x float>*, 2189 <vscale x 4 x i16>, 2190 <vscale x 4 x float>, 2191 <vscale x 4 x i1>, 2192 i32); 2193 2194define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 2195; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16: 2196; CHECK: # %bb.0: # %entry 2197; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 2198; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10, v0.t 2199; CHECK-NEXT: vmv2r.v v8, v10 2200; CHECK-NEXT: ret 2201entry: 2202 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( 2203 <vscale x 4 x float> *%0, 2204 <vscale x 4 x i16> %1, 2205 <vscale x 4 x float> %2, 2206 <vscale x 4 x i1> %3, 2207 i32 %4) 2208 2209 ret <vscale x 4 x float> %a 2210} 2211 2212declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16( 2213 <vscale x 8 x float>*, 2214 <vscale x 8 x i16>, 2215 <vscale x 8 x float>, 2216 i32); 2217 2218define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, i32 %3) nounwind { 2219; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i16: 2220; CHECK: # %bb.0: # %entry 2221; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 2222; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12 2223; CHECK-NEXT: vmv4r.v v8, v12 2224; CHECK-NEXT: ret 2225entry: 2226 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16( 2227 <vscale x 8 x float> *%0, 2228 <vscale x 8 x i16> %1, 2229 <vscale x 8 x float> %2, 2230 i32 %3) 2231 2232 ret <vscale x 8 x float> %a 2233} 2234 2235declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( 2236 <vscale x 8 x float>*, 2237 <vscale x 8 x i16>, 2238 <vscale x 8 x float>, 2239 <vscale x 8 x i1>, 2240 i32); 2241 2242define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 2243; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16: 2244; CHECK: # %bb.0: # %entry 2245; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 2246; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12, v0.t 2247; CHECK-NEXT: vmv4r.v v8, v12 2248; CHECK-NEXT: ret 2249entry: 2250 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( 2251 <vscale x 8 x float> *%0, 2252 <vscale x 8 x i16> %1, 2253 <vscale x 8 x float> %2, 2254 <vscale x 8 x i1> %3, 2255 i32 %4) 2256 2257 ret <vscale x 8 x float> %a 2258} 2259 2260declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16( 2261 <vscale x 16 x float>*, 2262 <vscale x 16 x i16>, 2263 <vscale x 16 x float>, 2264 i32); 2265 2266define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, i32 %3) nounwind { 2267; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i16: 2268; CHECK: # %bb.0: # %entry 2269; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 2270; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16 2271; CHECK-NEXT: vmv8r.v v8, v16 2272; CHECK-NEXT: ret 2273entry: 2274 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16( 2275 <vscale x 16 x float> *%0, 2276 <vscale x 16 x i16> %1, 2277 <vscale x 16 x float> %2, 2278 i32 %3) 2279 2280 ret <vscale x 16 x float> %a 2281} 2282 2283declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( 2284 <vscale x 16 x float>*, 2285 <vscale x 16 x i16>, 2286 <vscale x 16 x float>, 2287 <vscale x 16 x i1>, 2288 i32); 2289 2290define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 2291; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16: 2292; CHECK: # %bb.0: # %entry 2293; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 2294; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16, v0.t 2295; CHECK-NEXT: vmv8r.v v8, v16 2296; CHECK-NEXT: ret 2297entry: 2298 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( 2299 <vscale x 16 x float> *%0, 2300 <vscale x 16 x i16> %1, 2301 <vscale x 16 x float> %2, 2302 <vscale x 16 x i1> %3, 2303 i32 %4) 2304 2305 ret <vscale x 16 x float> %a 2306} 2307 2308declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16( 2309 <vscale x 1 x double>*, 2310 <vscale x 1 x i16>, 2311 <vscale x 1 x double>, 2312 i32); 2313 2314define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, i32 %3) nounwind { 2315; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i16: 2316; CHECK: # %bb.0: # %entry 2317; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 2318; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9 2319; CHECK-NEXT: vmv1r.v v8, v9 2320; CHECK-NEXT: ret 2321entry: 2322 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16( 2323 <vscale x 1 x double> *%0, 2324 <vscale x 1 x i16> %1, 2325 <vscale x 1 x double> %2, 2326 i32 %3) 2327 2328 ret <vscale x 1 x double> %a 2329} 2330 2331declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( 2332 <vscale x 1 x double>*, 2333 <vscale x 1 x i16>, 2334 <vscale x 1 x double>, 2335 <vscale x 1 x i1>, 2336 i32); 2337 2338define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 2339; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16: 2340; CHECK: # %bb.0: # %entry 2341; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 2342; CHECK-NEXT: vamoswapei16.v v9, (a0), v8, v9, v0.t 2343; CHECK-NEXT: vmv1r.v v8, v9 2344; CHECK-NEXT: ret 2345entry: 2346 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( 2347 <vscale x 1 x double> *%0, 2348 <vscale x 1 x i16> %1, 2349 <vscale x 1 x double> %2, 2350 <vscale x 1 x i1> %3, 2351 i32 %4) 2352 2353 ret <vscale x 1 x double> %a 2354} 2355 2356declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16( 2357 <vscale x 2 x double>*, 2358 <vscale x 2 x i16>, 2359 <vscale x 2 x double>, 2360 i32); 2361 2362define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, i32 %3) nounwind { 2363; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i16: 2364; CHECK: # %bb.0: # %entry 2365; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 2366; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10 2367; CHECK-NEXT: vmv2r.v v8, v10 2368; CHECK-NEXT: ret 2369entry: 2370 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16( 2371 <vscale x 2 x double> *%0, 2372 <vscale x 2 x i16> %1, 2373 <vscale x 2 x double> %2, 2374 i32 %3) 2375 2376 ret <vscale x 2 x double> %a 2377} 2378 2379declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( 2380 <vscale x 2 x double>*, 2381 <vscale x 2 x i16>, 2382 <vscale x 2 x double>, 2383 <vscale x 2 x i1>, 2384 i32); 2385 2386define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 2387; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16: 2388; CHECK: # %bb.0: # %entry 2389; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 2390; CHECK-NEXT: vamoswapei16.v v10, (a0), v8, v10, v0.t 2391; CHECK-NEXT: vmv2r.v v8, v10 2392; CHECK-NEXT: ret 2393entry: 2394 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( 2395 <vscale x 2 x double> *%0, 2396 <vscale x 2 x i16> %1, 2397 <vscale x 2 x double> %2, 2398 <vscale x 2 x i1> %3, 2399 i32 %4) 2400 2401 ret <vscale x 2 x double> %a 2402} 2403 2404declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16( 2405 <vscale x 4 x double>*, 2406 <vscale x 4 x i16>, 2407 <vscale x 4 x double>, 2408 i32); 2409 2410define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, i32 %3) nounwind { 2411; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i16: 2412; CHECK: # %bb.0: # %entry 2413; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2414; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12 2415; CHECK-NEXT: vmv4r.v v8, v12 2416; CHECK-NEXT: ret 2417entry: 2418 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16( 2419 <vscale x 4 x double> *%0, 2420 <vscale x 4 x i16> %1, 2421 <vscale x 4 x double> %2, 2422 i32 %3) 2423 2424 ret <vscale x 4 x double> %a 2425} 2426 2427declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( 2428 <vscale x 4 x double>*, 2429 <vscale x 4 x i16>, 2430 <vscale x 4 x double>, 2431 <vscale x 4 x i1>, 2432 i32); 2433 2434define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 2435; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16: 2436; CHECK: # %bb.0: # %entry 2437; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2438; CHECK-NEXT: vamoswapei16.v v12, (a0), v8, v12, v0.t 2439; CHECK-NEXT: vmv4r.v v8, v12 2440; CHECK-NEXT: ret 2441entry: 2442 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( 2443 <vscale x 4 x double> *%0, 2444 <vscale x 4 x i16> %1, 2445 <vscale x 4 x double> %2, 2446 <vscale x 4 x i1> %3, 2447 i32 %4) 2448 2449 ret <vscale x 4 x double> %a 2450} 2451 2452declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16( 2453 <vscale x 8 x double>*, 2454 <vscale x 8 x i16>, 2455 <vscale x 8 x double>, 2456 i32); 2457 2458define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, i32 %3) nounwind { 2459; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i16: 2460; CHECK: # %bb.0: # %entry 2461; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2462; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16 2463; CHECK-NEXT: vmv8r.v v8, v16 2464; CHECK-NEXT: ret 2465entry: 2466 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16( 2467 <vscale x 8 x double> *%0, 2468 <vscale x 8 x i16> %1, 2469 <vscale x 8 x double> %2, 2470 i32 %3) 2471 2472 ret <vscale x 8 x double> %a 2473} 2474 2475declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( 2476 <vscale x 8 x double>*, 2477 <vscale x 8 x i16>, 2478 <vscale x 8 x double>, 2479 <vscale x 8 x i1>, 2480 i32); 2481 2482define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 2483; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16: 2484; CHECK: # %bb.0: # %entry 2485; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2486; CHECK-NEXT: vamoswapei16.v v16, (a0), v8, v16, v0.t 2487; CHECK-NEXT: vmv8r.v v8, v16 2488; CHECK-NEXT: ret 2489entry: 2490 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( 2491 <vscale x 8 x double> *%0, 2492 <vscale x 8 x i16> %1, 2493 <vscale x 8 x double> %2, 2494 <vscale x 8 x i1> %3, 2495 i32 %4) 2496 2497 ret <vscale x 8 x double> %a 2498} 2499 2500declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8( 2501 <vscale x 1 x i32>*, 2502 <vscale x 1 x i8>, 2503 <vscale x 1 x i32>, 2504 i32); 2505 2506define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 2507; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i8: 2508; CHECK: # %bb.0: # %entry 2509; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2510; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 2511; CHECK-NEXT: vmv1r.v v8, v9 2512; CHECK-NEXT: ret 2513entry: 2514 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8( 2515 <vscale x 1 x i32> *%0, 2516 <vscale x 1 x i8> %1, 2517 <vscale x 1 x i32> %2, 2518 i32 %3) 2519 2520 ret <vscale x 1 x i32> %a 2521} 2522 2523declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( 2524 <vscale x 1 x i32>*, 2525 <vscale x 1 x i8>, 2526 <vscale x 1 x i32>, 2527 <vscale x 1 x i1>, 2528 i32); 2529 2530define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 2531; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8: 2532; CHECK: # %bb.0: # %entry 2533; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2534; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 2535; CHECK-NEXT: vmv1r.v v8, v9 2536; CHECK-NEXT: ret 2537entry: 2538 %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( 2539 <vscale x 1 x i32> *%0, 2540 <vscale x 1 x i8> %1, 2541 <vscale x 1 x i32> %2, 2542 <vscale x 1 x i1> %3, 2543 i32 %4) 2544 2545 ret <vscale x 1 x i32> %a 2546} 2547 2548declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8( 2549 <vscale x 2 x i32>*, 2550 <vscale x 2 x i8>, 2551 <vscale x 2 x i32>, 2552 i32); 2553 2554define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 2555; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i8: 2556; CHECK: # %bb.0: # %entry 2557; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2558; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 2559; CHECK-NEXT: vmv1r.v v8, v9 2560; CHECK-NEXT: ret 2561entry: 2562 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8( 2563 <vscale x 2 x i32> *%0, 2564 <vscale x 2 x i8> %1, 2565 <vscale x 2 x i32> %2, 2566 i32 %3) 2567 2568 ret <vscale x 2 x i32> %a 2569} 2570 2571declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( 2572 <vscale x 2 x i32>*, 2573 <vscale x 2 x i8>, 2574 <vscale x 2 x i32>, 2575 <vscale x 2 x i1>, 2576 i32); 2577 2578define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 2579; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8: 2580; CHECK: # %bb.0: # %entry 2581; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2582; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 2583; CHECK-NEXT: vmv1r.v v8, v9 2584; CHECK-NEXT: ret 2585entry: 2586 %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( 2587 <vscale x 2 x i32> *%0, 2588 <vscale x 2 x i8> %1, 2589 <vscale x 2 x i32> %2, 2590 <vscale x 2 x i1> %3, 2591 i32 %4) 2592 2593 ret <vscale x 2 x i32> %a 2594} 2595 2596declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8( 2597 <vscale x 4 x i32>*, 2598 <vscale x 4 x i8>, 2599 <vscale x 4 x i32>, 2600 i32); 2601 2602define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 2603; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i8: 2604; CHECK: # %bb.0: # %entry 2605; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 2606; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10 2607; CHECK-NEXT: vmv2r.v v8, v10 2608; CHECK-NEXT: ret 2609entry: 2610 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8( 2611 <vscale x 4 x i32> *%0, 2612 <vscale x 4 x i8> %1, 2613 <vscale x 4 x i32> %2, 2614 i32 %3) 2615 2616 ret <vscale x 4 x i32> %a 2617} 2618 2619declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( 2620 <vscale x 4 x i32>*, 2621 <vscale x 4 x i8>, 2622 <vscale x 4 x i32>, 2623 <vscale x 4 x i1>, 2624 i32); 2625 2626define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 2627; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8: 2628; CHECK: # %bb.0: # %entry 2629; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 2630; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10, v0.t 2631; CHECK-NEXT: vmv2r.v v8, v10 2632; CHECK-NEXT: ret 2633entry: 2634 %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( 2635 <vscale x 4 x i32> *%0, 2636 <vscale x 4 x i8> %1, 2637 <vscale x 4 x i32> %2, 2638 <vscale x 4 x i1> %3, 2639 i32 %4) 2640 2641 ret <vscale x 4 x i32> %a 2642} 2643 2644declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8( 2645 <vscale x 8 x i32>*, 2646 <vscale x 8 x i8>, 2647 <vscale x 8 x i32>, 2648 i32); 2649 2650define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 2651; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i8: 2652; CHECK: # %bb.0: # %entry 2653; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 2654; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12 2655; CHECK-NEXT: vmv4r.v v8, v12 2656; CHECK-NEXT: ret 2657entry: 2658 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8( 2659 <vscale x 8 x i32> *%0, 2660 <vscale x 8 x i8> %1, 2661 <vscale x 8 x i32> %2, 2662 i32 %3) 2663 2664 ret <vscale x 8 x i32> %a 2665} 2666 2667declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( 2668 <vscale x 8 x i32>*, 2669 <vscale x 8 x i8>, 2670 <vscale x 8 x i32>, 2671 <vscale x 8 x i1>, 2672 i32); 2673 2674define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 2675; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8: 2676; CHECK: # %bb.0: # %entry 2677; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 2678; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12, v0.t 2679; CHECK-NEXT: vmv4r.v v8, v12 2680; CHECK-NEXT: ret 2681entry: 2682 %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( 2683 <vscale x 8 x i32> *%0, 2684 <vscale x 8 x i8> %1, 2685 <vscale x 8 x i32> %2, 2686 <vscale x 8 x i1> %3, 2687 i32 %4) 2688 2689 ret <vscale x 8 x i32> %a 2690} 2691 2692declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8( 2693 <vscale x 16 x i32>*, 2694 <vscale x 16 x i8>, 2695 <vscale x 16 x i32>, 2696 i32); 2697 2698define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind { 2699; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i8: 2700; CHECK: # %bb.0: # %entry 2701; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 2702; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16 2703; CHECK-NEXT: vmv8r.v v8, v16 2704; CHECK-NEXT: ret 2705entry: 2706 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8( 2707 <vscale x 16 x i32> *%0, 2708 <vscale x 16 x i8> %1, 2709 <vscale x 16 x i32> %2, 2710 i32 %3) 2711 2712 ret <vscale x 16 x i32> %a 2713} 2714 2715declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( 2716 <vscale x 16 x i32>*, 2717 <vscale x 16 x i8>, 2718 <vscale x 16 x i32>, 2719 <vscale x 16 x i1>, 2720 i32); 2721 2722define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 2723; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8: 2724; CHECK: # %bb.0: # %entry 2725; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 2726; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16, v0.t 2727; CHECK-NEXT: vmv8r.v v8, v16 2728; CHECK-NEXT: ret 2729entry: 2730 %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( 2731 <vscale x 16 x i32> *%0, 2732 <vscale x 16 x i8> %1, 2733 <vscale x 16 x i32> %2, 2734 <vscale x 16 x i1> %3, 2735 i32 %4) 2736 2737 ret <vscale x 16 x i32> %a 2738} 2739 2740declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8( 2741 <vscale x 1 x i64>*, 2742 <vscale x 1 x i8>, 2743 <vscale x 1 x i64>, 2744 i32); 2745 2746define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 2747; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i8: 2748; CHECK: # %bb.0: # %entry 2749; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 2750; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 2751; CHECK-NEXT: vmv1r.v v8, v9 2752; CHECK-NEXT: ret 2753entry: 2754 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8( 2755 <vscale x 1 x i64> *%0, 2756 <vscale x 1 x i8> %1, 2757 <vscale x 1 x i64> %2, 2758 i32 %3) 2759 2760 ret <vscale x 1 x i64> %a 2761} 2762 2763declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8( 2764 <vscale x 1 x i64>*, 2765 <vscale x 1 x i8>, 2766 <vscale x 1 x i64>, 2767 <vscale x 1 x i1>, 2768 i32); 2769 2770define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 2771; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8: 2772; CHECK: # %bb.0: # %entry 2773; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 2774; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 2775; CHECK-NEXT: vmv1r.v v8, v9 2776; CHECK-NEXT: ret 2777entry: 2778 %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8( 2779 <vscale x 1 x i64> *%0, 2780 <vscale x 1 x i8> %1, 2781 <vscale x 1 x i64> %2, 2782 <vscale x 1 x i1> %3, 2783 i32 %4) 2784 2785 ret <vscale x 1 x i64> %a 2786} 2787 2788declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8( 2789 <vscale x 2 x i64>*, 2790 <vscale x 2 x i8>, 2791 <vscale x 2 x i64>, 2792 i32); 2793 2794define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 2795; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i8: 2796; CHECK: # %bb.0: # %entry 2797; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 2798; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10 2799; CHECK-NEXT: vmv2r.v v8, v10 2800; CHECK-NEXT: ret 2801entry: 2802 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8( 2803 <vscale x 2 x i64> *%0, 2804 <vscale x 2 x i8> %1, 2805 <vscale x 2 x i64> %2, 2806 i32 %3) 2807 2808 ret <vscale x 2 x i64> %a 2809} 2810 2811declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8( 2812 <vscale x 2 x i64>*, 2813 <vscale x 2 x i8>, 2814 <vscale x 2 x i64>, 2815 <vscale x 2 x i1>, 2816 i32); 2817 2818define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 2819; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8: 2820; CHECK: # %bb.0: # %entry 2821; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 2822; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10, v0.t 2823; CHECK-NEXT: vmv2r.v v8, v10 2824; CHECK-NEXT: ret 2825entry: 2826 %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8( 2827 <vscale x 2 x i64> *%0, 2828 <vscale x 2 x i8> %1, 2829 <vscale x 2 x i64> %2, 2830 <vscale x 2 x i1> %3, 2831 i32 %4) 2832 2833 ret <vscale x 2 x i64> %a 2834} 2835 2836declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8( 2837 <vscale x 4 x i64>*, 2838 <vscale x 4 x i8>, 2839 <vscale x 4 x i64>, 2840 i32); 2841 2842define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 2843; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i8: 2844; CHECK: # %bb.0: # %entry 2845; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2846; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12 2847; CHECK-NEXT: vmv4r.v v8, v12 2848; CHECK-NEXT: ret 2849entry: 2850 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8( 2851 <vscale x 4 x i64> *%0, 2852 <vscale x 4 x i8> %1, 2853 <vscale x 4 x i64> %2, 2854 i32 %3) 2855 2856 ret <vscale x 4 x i64> %a 2857} 2858 2859declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8( 2860 <vscale x 4 x i64>*, 2861 <vscale x 4 x i8>, 2862 <vscale x 4 x i64>, 2863 <vscale x 4 x i1>, 2864 i32); 2865 2866define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 2867; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8: 2868; CHECK: # %bb.0: # %entry 2869; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 2870; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12, v0.t 2871; CHECK-NEXT: vmv4r.v v8, v12 2872; CHECK-NEXT: ret 2873entry: 2874 %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8( 2875 <vscale x 4 x i64> *%0, 2876 <vscale x 4 x i8> %1, 2877 <vscale x 4 x i64> %2, 2878 <vscale x 4 x i1> %3, 2879 i32 %4) 2880 2881 ret <vscale x 4 x i64> %a 2882} 2883 2884declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8( 2885 <vscale x 8 x i64>*, 2886 <vscale x 8 x i8>, 2887 <vscale x 8 x i64>, 2888 i32); 2889 2890define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind { 2891; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i8: 2892; CHECK: # %bb.0: # %entry 2893; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2894; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16 2895; CHECK-NEXT: vmv8r.v v8, v16 2896; CHECK-NEXT: ret 2897entry: 2898 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8( 2899 <vscale x 8 x i64> *%0, 2900 <vscale x 8 x i8> %1, 2901 <vscale x 8 x i64> %2, 2902 i32 %3) 2903 2904 ret <vscale x 8 x i64> %a 2905} 2906 2907declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8( 2908 <vscale x 8 x i64>*, 2909 <vscale x 8 x i8>, 2910 <vscale x 8 x i64>, 2911 <vscale x 8 x i1>, 2912 i32); 2913 2914define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 2915; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8: 2916; CHECK: # %bb.0: # %entry 2917; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 2918; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16, v0.t 2919; CHECK-NEXT: vmv8r.v v8, v16 2920; CHECK-NEXT: ret 2921entry: 2922 %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8( 2923 <vscale x 8 x i64> *%0, 2924 <vscale x 8 x i8> %1, 2925 <vscale x 8 x i64> %2, 2926 <vscale x 8 x i1> %3, 2927 i32 %4) 2928 2929 ret <vscale x 8 x i64> %a 2930} 2931 2932declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8( 2933 <vscale x 1 x float>*, 2934 <vscale x 1 x i8>, 2935 <vscale x 1 x float>, 2936 i32); 2937 2938define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, i32 %3) nounwind { 2939; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i8: 2940; CHECK: # %bb.0: # %entry 2941; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2942; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 2943; CHECK-NEXT: vmv1r.v v8, v9 2944; CHECK-NEXT: ret 2945entry: 2946 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8( 2947 <vscale x 1 x float> *%0, 2948 <vscale x 1 x i8> %1, 2949 <vscale x 1 x float> %2, 2950 i32 %3) 2951 2952 ret <vscale x 1 x float> %a 2953} 2954 2955declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( 2956 <vscale x 1 x float>*, 2957 <vscale x 1 x i8>, 2958 <vscale x 1 x float>, 2959 <vscale x 1 x i1>, 2960 i32); 2961 2962define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 2963; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8: 2964; CHECK: # %bb.0: # %entry 2965; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 2966; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 2967; CHECK-NEXT: vmv1r.v v8, v9 2968; CHECK-NEXT: ret 2969entry: 2970 %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( 2971 <vscale x 1 x float> *%0, 2972 <vscale x 1 x i8> %1, 2973 <vscale x 1 x float> %2, 2974 <vscale x 1 x i1> %3, 2975 i32 %4) 2976 2977 ret <vscale x 1 x float> %a 2978} 2979 2980declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8( 2981 <vscale x 2 x float>*, 2982 <vscale x 2 x i8>, 2983 <vscale x 2 x float>, 2984 i32); 2985 2986define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, i32 %3) nounwind { 2987; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i8: 2988; CHECK: # %bb.0: # %entry 2989; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 2990; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 2991; CHECK-NEXT: vmv1r.v v8, v9 2992; CHECK-NEXT: ret 2993entry: 2994 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8( 2995 <vscale x 2 x float> *%0, 2996 <vscale x 2 x i8> %1, 2997 <vscale x 2 x float> %2, 2998 i32 %3) 2999 3000 ret <vscale x 2 x float> %a 3001} 3002 3003declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( 3004 <vscale x 2 x float>*, 3005 <vscale x 2 x i8>, 3006 <vscale x 2 x float>, 3007 <vscale x 2 x i1>, 3008 i32); 3009 3010define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 3011; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8: 3012; CHECK: # %bb.0: # %entry 3013; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 3014; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 3015; CHECK-NEXT: vmv1r.v v8, v9 3016; CHECK-NEXT: ret 3017entry: 3018 %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( 3019 <vscale x 2 x float> *%0, 3020 <vscale x 2 x i8> %1, 3021 <vscale x 2 x float> %2, 3022 <vscale x 2 x i1> %3, 3023 i32 %4) 3024 3025 ret <vscale x 2 x float> %a 3026} 3027 3028declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8( 3029 <vscale x 4 x float>*, 3030 <vscale x 4 x i8>, 3031 <vscale x 4 x float>, 3032 i32); 3033 3034define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, i32 %3) nounwind { 3035; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i8: 3036; CHECK: # %bb.0: # %entry 3037; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 3038; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10 3039; CHECK-NEXT: vmv2r.v v8, v10 3040; CHECK-NEXT: ret 3041entry: 3042 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8( 3043 <vscale x 4 x float> *%0, 3044 <vscale x 4 x i8> %1, 3045 <vscale x 4 x float> %2, 3046 i32 %3) 3047 3048 ret <vscale x 4 x float> %a 3049} 3050 3051declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( 3052 <vscale x 4 x float>*, 3053 <vscale x 4 x i8>, 3054 <vscale x 4 x float>, 3055 <vscale x 4 x i1>, 3056 i32); 3057 3058define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 3059; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8: 3060; CHECK: # %bb.0: # %entry 3061; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 3062; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10, v0.t 3063; CHECK-NEXT: vmv2r.v v8, v10 3064; CHECK-NEXT: ret 3065entry: 3066 %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( 3067 <vscale x 4 x float> *%0, 3068 <vscale x 4 x i8> %1, 3069 <vscale x 4 x float> %2, 3070 <vscale x 4 x i1> %3, 3071 i32 %4) 3072 3073 ret <vscale x 4 x float> %a 3074} 3075 3076declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8( 3077 <vscale x 8 x float>*, 3078 <vscale x 8 x i8>, 3079 <vscale x 8 x float>, 3080 i32); 3081 3082define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, i32 %3) nounwind { 3083; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i8: 3084; CHECK: # %bb.0: # %entry 3085; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 3086; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12 3087; CHECK-NEXT: vmv4r.v v8, v12 3088; CHECK-NEXT: ret 3089entry: 3090 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8( 3091 <vscale x 8 x float> *%0, 3092 <vscale x 8 x i8> %1, 3093 <vscale x 8 x float> %2, 3094 i32 %3) 3095 3096 ret <vscale x 8 x float> %a 3097} 3098 3099declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( 3100 <vscale x 8 x float>*, 3101 <vscale x 8 x i8>, 3102 <vscale x 8 x float>, 3103 <vscale x 8 x i1>, 3104 i32); 3105 3106define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 3107; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8: 3108; CHECK: # %bb.0: # %entry 3109; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 3110; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12, v0.t 3111; CHECK-NEXT: vmv4r.v v8, v12 3112; CHECK-NEXT: ret 3113entry: 3114 %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( 3115 <vscale x 8 x float> *%0, 3116 <vscale x 8 x i8> %1, 3117 <vscale x 8 x float> %2, 3118 <vscale x 8 x i1> %3, 3119 i32 %4) 3120 3121 ret <vscale x 8 x float> %a 3122} 3123 3124declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8( 3125 <vscale x 16 x float>*, 3126 <vscale x 16 x i8>, 3127 <vscale x 16 x float>, 3128 i32); 3129 3130define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, i32 %3) nounwind { 3131; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i8: 3132; CHECK: # %bb.0: # %entry 3133; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 3134; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16 3135; CHECK-NEXT: vmv8r.v v8, v16 3136; CHECK-NEXT: ret 3137entry: 3138 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8( 3139 <vscale x 16 x float> *%0, 3140 <vscale x 16 x i8> %1, 3141 <vscale x 16 x float> %2, 3142 i32 %3) 3143 3144 ret <vscale x 16 x float> %a 3145} 3146 3147declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( 3148 <vscale x 16 x float>*, 3149 <vscale x 16 x i8>, 3150 <vscale x 16 x float>, 3151 <vscale x 16 x i1>, 3152 i32); 3153 3154define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 3155; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8: 3156; CHECK: # %bb.0: # %entry 3157; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 3158; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16, v0.t 3159; CHECK-NEXT: vmv8r.v v8, v16 3160; CHECK-NEXT: ret 3161entry: 3162 %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( 3163 <vscale x 16 x float> *%0, 3164 <vscale x 16 x i8> %1, 3165 <vscale x 16 x float> %2, 3166 <vscale x 16 x i1> %3, 3167 i32 %4) 3168 3169 ret <vscale x 16 x float> %a 3170} 3171 3172declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8( 3173 <vscale x 1 x double>*, 3174 <vscale x 1 x i8>, 3175 <vscale x 1 x double>, 3176 i32); 3177 3178define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, i32 %3) nounwind { 3179; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i8: 3180; CHECK: # %bb.0: # %entry 3181; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 3182; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9 3183; CHECK-NEXT: vmv1r.v v8, v9 3184; CHECK-NEXT: ret 3185entry: 3186 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8( 3187 <vscale x 1 x double> *%0, 3188 <vscale x 1 x i8> %1, 3189 <vscale x 1 x double> %2, 3190 i32 %3) 3191 3192 ret <vscale x 1 x double> %a 3193} 3194 3195declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( 3196 <vscale x 1 x double>*, 3197 <vscale x 1 x i8>, 3198 <vscale x 1 x double>, 3199 <vscale x 1 x i1>, 3200 i32); 3201 3202define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 3203; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8: 3204; CHECK: # %bb.0: # %entry 3205; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 3206; CHECK-NEXT: vamoswapei8.v v9, (a0), v8, v9, v0.t 3207; CHECK-NEXT: vmv1r.v v8, v9 3208; CHECK-NEXT: ret 3209entry: 3210 %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( 3211 <vscale x 1 x double> *%0, 3212 <vscale x 1 x i8> %1, 3213 <vscale x 1 x double> %2, 3214 <vscale x 1 x i1> %3, 3215 i32 %4) 3216 3217 ret <vscale x 1 x double> %a 3218} 3219 3220declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8( 3221 <vscale x 2 x double>*, 3222 <vscale x 2 x i8>, 3223 <vscale x 2 x double>, 3224 i32); 3225 3226define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, i32 %3) nounwind { 3227; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i8: 3228; CHECK: # %bb.0: # %entry 3229; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 3230; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10 3231; CHECK-NEXT: vmv2r.v v8, v10 3232; CHECK-NEXT: ret 3233entry: 3234 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8( 3235 <vscale x 2 x double> *%0, 3236 <vscale x 2 x i8> %1, 3237 <vscale x 2 x double> %2, 3238 i32 %3) 3239 3240 ret <vscale x 2 x double> %a 3241} 3242 3243declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( 3244 <vscale x 2 x double>*, 3245 <vscale x 2 x i8>, 3246 <vscale x 2 x double>, 3247 <vscale x 2 x i1>, 3248 i32); 3249 3250define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 3251; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8: 3252; CHECK: # %bb.0: # %entry 3253; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 3254; CHECK-NEXT: vamoswapei8.v v10, (a0), v8, v10, v0.t 3255; CHECK-NEXT: vmv2r.v v8, v10 3256; CHECK-NEXT: ret 3257entry: 3258 %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( 3259 <vscale x 2 x double> *%0, 3260 <vscale x 2 x i8> %1, 3261 <vscale x 2 x double> %2, 3262 <vscale x 2 x i1> %3, 3263 i32 %4) 3264 3265 ret <vscale x 2 x double> %a 3266} 3267 3268declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8( 3269 <vscale x 4 x double>*, 3270 <vscale x 4 x i8>, 3271 <vscale x 4 x double>, 3272 i32); 3273 3274define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, i32 %3) nounwind { 3275; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i8: 3276; CHECK: # %bb.0: # %entry 3277; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 3278; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12 3279; CHECK-NEXT: vmv4r.v v8, v12 3280; CHECK-NEXT: ret 3281entry: 3282 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8( 3283 <vscale x 4 x double> *%0, 3284 <vscale x 4 x i8> %1, 3285 <vscale x 4 x double> %2, 3286 i32 %3) 3287 3288 ret <vscale x 4 x double> %a 3289} 3290 3291declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( 3292 <vscale x 4 x double>*, 3293 <vscale x 4 x i8>, 3294 <vscale x 4 x double>, 3295 <vscale x 4 x i1>, 3296 i32); 3297 3298define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 3299; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8: 3300; CHECK: # %bb.0: # %entry 3301; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 3302; CHECK-NEXT: vamoswapei8.v v12, (a0), v8, v12, v0.t 3303; CHECK-NEXT: vmv4r.v v8, v12 3304; CHECK-NEXT: ret 3305entry: 3306 %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( 3307 <vscale x 4 x double> *%0, 3308 <vscale x 4 x i8> %1, 3309 <vscale x 4 x double> %2, 3310 <vscale x 4 x i1> %3, 3311 i32 %4) 3312 3313 ret <vscale x 4 x double> %a 3314} 3315 3316declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8( 3317 <vscale x 8 x double>*, 3318 <vscale x 8 x i8>, 3319 <vscale x 8 x double>, 3320 i32); 3321 3322define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, i32 %3) nounwind { 3323; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i8: 3324; CHECK: # %bb.0: # %entry 3325; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 3326; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16 3327; CHECK-NEXT: vmv8r.v v8, v16 3328; CHECK-NEXT: ret 3329entry: 3330 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8( 3331 <vscale x 8 x double> *%0, 3332 <vscale x 8 x i8> %1, 3333 <vscale x 8 x double> %2, 3334 i32 %3) 3335 3336 ret <vscale x 8 x double> %a 3337} 3338 3339declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( 3340 <vscale x 8 x double>*, 3341 <vscale x 8 x i8>, 3342 <vscale x 8 x double>, 3343 <vscale x 8 x i1>, 3344 i32); 3345 3346define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 3347; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8: 3348; CHECK: # %bb.0: # %entry 3349; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 3350; CHECK-NEXT: vamoswapei8.v v16, (a0), v8, v16, v0.t 3351; CHECK-NEXT: vmv8r.v v8, v16 3352; CHECK-NEXT: ret 3353entry: 3354 %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( 3355 <vscale x 8 x double> *%0, 3356 <vscale x 8 x i8> %1, 3357 <vscale x 8 x double> %2, 3358 <vscale x 8 x i1> %3, 3359 i32 %4) 3360 3361 ret <vscale x 8 x double> %a 3362} 3363