1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16( 5 <vscale x 1 x half>, 6 <vscale x 1 x half>, 7 <vscale x 1 x half>, 8 i64); 9 10define <vscale x 1 x half> @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind { 11; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 14; CHECK-NEXT: vfmadd.vv v8, v9, v10 15; CHECK-NEXT: ret 16entry: 17 %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16( 18 <vscale x 1 x half> %0, 19 <vscale x 1 x half> %1, 20 <vscale x 1 x half> %2, 21 i64 %3) 22 23 ret <vscale x 1 x half> %a 24} 25 26declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( 27 <vscale x 1 x half>, 28 <vscale x 1 x half>, 29 <vscale x 1 x half>, 30 <vscale x 1 x i1>, 31 i64); 32 33define <vscale x 1 x half> @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 34; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 37; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 38; CHECK-NEXT: ret 39entry: 40 %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( 41 <vscale x 1 x half> %0, 42 <vscale x 1 x half> %1, 43 <vscale x 1 x half> %2, 44 <vscale x 1 x i1> %3, 45 i64 %4) 46 47 ret <vscale x 1 x half> %a 48} 49 50declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16( 51 <vscale x 2 x half>, 52 <vscale x 2 x half>, 53 <vscale x 2 x half>, 54 i64); 55 56define <vscale x 2 x half> @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind { 57; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: 58; CHECK: # %bb.0: # %entry 59; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 60; CHECK-NEXT: vfmadd.vv v8, v9, v10 61; CHECK-NEXT: ret 62entry: 63 %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16( 64 <vscale x 2 x half> %0, 65 <vscale x 2 x half> %1, 66 <vscale x 2 x half> %2, 67 i64 %3) 68 69 ret <vscale x 2 x half> %a 70} 71 72declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( 73 <vscale x 2 x half>, 74 <vscale x 2 x half>, 75 <vscale x 2 x half>, 76 <vscale x 2 x i1>, 77 i64); 78 79define <vscale x 2 x half> @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 80; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: 81; CHECK: # %bb.0: # %entry 82; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 83; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 84; CHECK-NEXT: ret 85entry: 86 %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( 87 <vscale x 2 x half> %0, 88 <vscale x 2 x half> %1, 89 <vscale x 2 x half> %2, 90 <vscale x 2 x i1> %3, 91 i64 %4) 92 93 ret <vscale x 2 x half> %a 94} 95 96declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16( 97 <vscale x 4 x half>, 98 <vscale x 4 x half>, 99 <vscale x 4 x half>, 100 i64); 101 102define <vscale x 4 x half> @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 103; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: 104; CHECK: # %bb.0: # %entry 105; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 106; CHECK-NEXT: vfmadd.vv v8, v9, v10 107; CHECK-NEXT: ret 108entry: 109 %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16( 110 <vscale x 4 x half> %0, 111 <vscale x 4 x half> %1, 112 <vscale x 4 x half> %2, 113 i64 %3) 114 115 ret <vscale x 4 x half> %a 116} 117 118declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( 119 <vscale x 4 x half>, 120 <vscale x 4 x half>, 121 <vscale x 4 x half>, 122 <vscale x 4 x i1>, 123 i64); 124 125define <vscale x 4 x half> @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 126; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: 127; CHECK: # %bb.0: # %entry 128; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 129; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 130; CHECK-NEXT: ret 131entry: 132 %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( 133 <vscale x 4 x half> %0, 134 <vscale x 4 x half> %1, 135 <vscale x 4 x half> %2, 136 <vscale x 4 x i1> %3, 137 i64 %4) 138 139 ret <vscale x 4 x half> %a 140} 141 142declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16( 143 <vscale x 8 x half>, 144 <vscale x 8 x half>, 145 <vscale x 8 x half>, 146 i64); 147 148define <vscale x 8 x half> @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind { 149; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: 150; CHECK: # %bb.0: # %entry 151; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 152; CHECK-NEXT: vfmadd.vv v8, v10, v12 153; CHECK-NEXT: ret 154entry: 155 %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16( 156 <vscale x 8 x half> %0, 157 <vscale x 8 x half> %1, 158 <vscale x 8 x half> %2, 159 i64 %3) 160 161 ret <vscale x 8 x half> %a 162} 163 164declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( 165 <vscale x 8 x half>, 166 <vscale x 8 x half>, 167 <vscale x 8 x half>, 168 <vscale x 8 x i1>, 169 i64); 170 171define <vscale x 8 x half> @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 172; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: 173; CHECK: # %bb.0: # %entry 174; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 175; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t 176; CHECK-NEXT: ret 177entry: 178 %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( 179 <vscale x 8 x half> %0, 180 <vscale x 8 x half> %1, 181 <vscale x 8 x half> %2, 182 <vscale x 8 x i1> %3, 183 i64 %4) 184 185 ret <vscale x 8 x half> %a 186} 187 188declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16( 189 <vscale x 16 x half>, 190 <vscale x 16 x half>, 191 <vscale x 16 x half>, 192 i64); 193 194define <vscale x 16 x half> @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind { 195; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: 196; CHECK: # %bb.0: # %entry 197; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 198; CHECK-NEXT: vfmadd.vv v8, v12, v16 199; CHECK-NEXT: ret 200entry: 201 %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16( 202 <vscale x 16 x half> %0, 203 <vscale x 16 x half> %1, 204 <vscale x 16 x half> %2, 205 i64 %3) 206 207 ret <vscale x 16 x half> %a 208} 209 210declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( 211 <vscale x 16 x half>, 212 <vscale x 16 x half>, 213 <vscale x 16 x half>, 214 <vscale x 16 x i1>, 215 i64); 216 217define <vscale x 16 x half> @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 218; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: 219; CHECK: # %bb.0: # %entry 220; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 221; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t 222; CHECK-NEXT: ret 223entry: 224 %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( 225 <vscale x 16 x half> %0, 226 <vscale x 16 x half> %1, 227 <vscale x 16 x half> %2, 228 <vscale x 16 x i1> %3, 229 i64 %4) 230 231 ret <vscale x 16 x half> %a 232} 233 234declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32( 235 <vscale x 1 x float>, 236 <vscale x 1 x float>, 237 <vscale x 1 x float>, 238 i64); 239 240define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind { 241; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: 242; CHECK: # %bb.0: # %entry 243; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 244; CHECK-NEXT: vfmadd.vv v8, v9, v10 245; CHECK-NEXT: ret 246entry: 247 %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32( 248 <vscale x 1 x float> %0, 249 <vscale x 1 x float> %1, 250 <vscale x 1 x float> %2, 251 i64 %3) 252 253 ret <vscale x 1 x float> %a 254} 255 256declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( 257 <vscale x 1 x float>, 258 <vscale x 1 x float>, 259 <vscale x 1 x float>, 260 <vscale x 1 x i1>, 261 i64); 262 263define <vscale x 1 x float> @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 264; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: 265; CHECK: # %bb.0: # %entry 266; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 267; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 268; CHECK-NEXT: ret 269entry: 270 %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( 271 <vscale x 1 x float> %0, 272 <vscale x 1 x float> %1, 273 <vscale x 1 x float> %2, 274 <vscale x 1 x i1> %3, 275 i64 %4) 276 277 ret <vscale x 1 x float> %a 278} 279 280declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32( 281 <vscale x 2 x float>, 282 <vscale x 2 x float>, 283 <vscale x 2 x float>, 284 i64); 285 286define <vscale x 2 x float> @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 287; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: 288; CHECK: # %bb.0: # %entry 289; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 290; CHECK-NEXT: vfmadd.vv v8, v9, v10 291; CHECK-NEXT: ret 292entry: 293 %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32( 294 <vscale x 2 x float> %0, 295 <vscale x 2 x float> %1, 296 <vscale x 2 x float> %2, 297 i64 %3) 298 299 ret <vscale x 2 x float> %a 300} 301 302declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( 303 <vscale x 2 x float>, 304 <vscale x 2 x float>, 305 <vscale x 2 x float>, 306 <vscale x 2 x i1>, 307 i64); 308 309define <vscale x 2 x float> @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 310; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: 311; CHECK: # %bb.0: # %entry 312; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 313; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 314; CHECK-NEXT: ret 315entry: 316 %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( 317 <vscale x 2 x float> %0, 318 <vscale x 2 x float> %1, 319 <vscale x 2 x float> %2, 320 <vscale x 2 x i1> %3, 321 i64 %4) 322 323 ret <vscale x 2 x float> %a 324} 325 326declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32( 327 <vscale x 4 x float>, 328 <vscale x 4 x float>, 329 <vscale x 4 x float>, 330 i64); 331 332define <vscale x 4 x float> @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind { 333; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: 334; CHECK: # %bb.0: # %entry 335; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 336; CHECK-NEXT: vfmadd.vv v8, v10, v12 337; CHECK-NEXT: ret 338entry: 339 %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32( 340 <vscale x 4 x float> %0, 341 <vscale x 4 x float> %1, 342 <vscale x 4 x float> %2, 343 i64 %3) 344 345 ret <vscale x 4 x float> %a 346} 347 348declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( 349 <vscale x 4 x float>, 350 <vscale x 4 x float>, 351 <vscale x 4 x float>, 352 <vscale x 4 x i1>, 353 i64); 354 355define <vscale x 4 x float> @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 356; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: 357; CHECK: # %bb.0: # %entry 358; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 359; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t 360; CHECK-NEXT: ret 361entry: 362 %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( 363 <vscale x 4 x float> %0, 364 <vscale x 4 x float> %1, 365 <vscale x 4 x float> %2, 366 <vscale x 4 x i1> %3, 367 i64 %4) 368 369 ret <vscale x 4 x float> %a 370} 371 372declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32( 373 <vscale x 8 x float>, 374 <vscale x 8 x float>, 375 <vscale x 8 x float>, 376 i64); 377 378define <vscale x 8 x float> @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind { 379; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: 380; CHECK: # %bb.0: # %entry 381; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 382; CHECK-NEXT: vfmadd.vv v8, v12, v16 383; CHECK-NEXT: ret 384entry: 385 %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32( 386 <vscale x 8 x float> %0, 387 <vscale x 8 x float> %1, 388 <vscale x 8 x float> %2, 389 i64 %3) 390 391 ret <vscale x 8 x float> %a 392} 393 394declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( 395 <vscale x 8 x float>, 396 <vscale x 8 x float>, 397 <vscale x 8 x float>, 398 <vscale x 8 x i1>, 399 i64); 400 401define <vscale x 8 x float> @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 402; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: 403; CHECK: # %bb.0: # %entry 404; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 405; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t 406; CHECK-NEXT: ret 407entry: 408 %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( 409 <vscale x 8 x float> %0, 410 <vscale x 8 x float> %1, 411 <vscale x 8 x float> %2, 412 <vscale x 8 x i1> %3, 413 i64 %4) 414 415 ret <vscale x 8 x float> %a 416} 417 418declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64( 419 <vscale x 1 x double>, 420 <vscale x 1 x double>, 421 <vscale x 1 x double>, 422 i64); 423 424define <vscale x 1 x double> @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind { 425; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: 426; CHECK: # %bb.0: # %entry 427; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 428; CHECK-NEXT: vfmadd.vv v8, v9, v10 429; CHECK-NEXT: ret 430entry: 431 %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64( 432 <vscale x 1 x double> %0, 433 <vscale x 1 x double> %1, 434 <vscale x 1 x double> %2, 435 i64 %3) 436 437 ret <vscale x 1 x double> %a 438} 439 440declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( 441 <vscale x 1 x double>, 442 <vscale x 1 x double>, 443 <vscale x 1 x double>, 444 <vscale x 1 x i1>, 445 i64); 446 447define <vscale x 1 x double> @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 448; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: 449; CHECK: # %bb.0: # %entry 450; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 451; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t 452; CHECK-NEXT: ret 453entry: 454 %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( 455 <vscale x 1 x double> %0, 456 <vscale x 1 x double> %1, 457 <vscale x 1 x double> %2, 458 <vscale x 1 x i1> %3, 459 i64 %4) 460 461 ret <vscale x 1 x double> %a 462} 463 464declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64( 465 <vscale x 2 x double>, 466 <vscale x 2 x double>, 467 <vscale x 2 x double>, 468 i64); 469 470define <vscale x 2 x double> @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind { 471; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 474; CHECK-NEXT: vfmadd.vv v8, v10, v12 475; CHECK-NEXT: ret 476entry: 477 %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64( 478 <vscale x 2 x double> %0, 479 <vscale x 2 x double> %1, 480 <vscale x 2 x double> %2, 481 i64 %3) 482 483 ret <vscale x 2 x double> %a 484} 485 486declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( 487 <vscale x 2 x double>, 488 <vscale x 2 x double>, 489 <vscale x 2 x double>, 490 <vscale x 2 x i1>, 491 i64); 492 493define <vscale x 2 x double> @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 494; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: 495; CHECK: # %bb.0: # %entry 496; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 497; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t 498; CHECK-NEXT: ret 499entry: 500 %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( 501 <vscale x 2 x double> %0, 502 <vscale x 2 x double> %1, 503 <vscale x 2 x double> %2, 504 <vscale x 2 x i1> %3, 505 i64 %4) 506 507 ret <vscale x 2 x double> %a 508} 509 510declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64( 511 <vscale x 4 x double>, 512 <vscale x 4 x double>, 513 <vscale x 4 x double>, 514 i64); 515 516define <vscale x 4 x double> @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind { 517; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: 518; CHECK: # %bb.0: # %entry 519; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 520; CHECK-NEXT: vfmadd.vv v8, v12, v16 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64( 524 <vscale x 4 x double> %0, 525 <vscale x 4 x double> %1, 526 <vscale x 4 x double> %2, 527 i64 %3) 528 529 ret <vscale x 4 x double> %a 530} 531 532declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( 533 <vscale x 4 x double>, 534 <vscale x 4 x double>, 535 <vscale x 4 x double>, 536 <vscale x 4 x i1>, 537 i64); 538 539define <vscale x 4 x double> @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 540; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: 541; CHECK: # %bb.0: # %entry 542; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 543; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t 544; CHECK-NEXT: ret 545entry: 546 %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( 547 <vscale x 4 x double> %0, 548 <vscale x 4 x double> %1, 549 <vscale x 4 x double> %2, 550 <vscale x 4 x i1> %3, 551 i64 %4) 552 553 ret <vscale x 4 x double> %a 554} 555 556declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16( 557 <vscale x 1 x half>, 558 half, 559 <vscale x 1 x half>, 560 i64); 561 562define <vscale x 1 x half> @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind { 563; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: 564; CHECK: # %bb.0: # %entry 565; CHECK-NEXT: fmv.h.x ft0, a0 566; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 567; CHECK-NEXT: vfmadd.vf v8, ft0, v9 568; CHECK-NEXT: ret 569entry: 570 %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16( 571 <vscale x 1 x half> %0, 572 half %1, 573 <vscale x 1 x half> %2, 574 i64 %3) 575 576 ret <vscale x 1 x half> %a 577} 578 579declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16( 580 <vscale x 1 x half>, 581 half, 582 <vscale x 1 x half>, 583 <vscale x 1 x i1>, 584 i64); 585 586define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 587; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: 588; CHECK: # %bb.0: # %entry 589; CHECK-NEXT: fmv.h.x ft0, a0 590; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 591; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 592; CHECK-NEXT: ret 593entry: 594 %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16( 595 <vscale x 1 x half> %0, 596 half %1, 597 <vscale x 1 x half> %2, 598 <vscale x 1 x i1> %3, 599 i64 %4) 600 601 ret <vscale x 1 x half> %a 602} 603 604declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16( 605 <vscale x 2 x half>, 606 half, 607 <vscale x 2 x half>, 608 i64); 609 610define <vscale x 2 x half> @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind { 611; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: 612; CHECK: # %bb.0: # %entry 613; CHECK-NEXT: fmv.h.x ft0, a0 614; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 615; CHECK-NEXT: vfmadd.vf v8, ft0, v9 616; CHECK-NEXT: ret 617entry: 618 %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16( 619 <vscale x 2 x half> %0, 620 half %1, 621 <vscale x 2 x half> %2, 622 i64 %3) 623 624 ret <vscale x 2 x half> %a 625} 626 627declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16( 628 <vscale x 2 x half>, 629 half, 630 <vscale x 2 x half>, 631 <vscale x 2 x i1>, 632 i64); 633 634define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 635; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: 636; CHECK: # %bb.0: # %entry 637; CHECK-NEXT: fmv.h.x ft0, a0 638; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 639; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 640; CHECK-NEXT: ret 641entry: 642 %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16( 643 <vscale x 2 x half> %0, 644 half %1, 645 <vscale x 2 x half> %2, 646 <vscale x 2 x i1> %3, 647 i64 %4) 648 649 ret <vscale x 2 x half> %a 650} 651 652declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16( 653 <vscale x 4 x half>, 654 half, 655 <vscale x 4 x half>, 656 i64); 657 658define <vscale x 4 x half> @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind { 659; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: 660; CHECK: # %bb.0: # %entry 661; CHECK-NEXT: fmv.h.x ft0, a0 662; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 663; CHECK-NEXT: vfmadd.vf v8, ft0, v9 664; CHECK-NEXT: ret 665entry: 666 %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16( 667 <vscale x 4 x half> %0, 668 half %1, 669 <vscale x 4 x half> %2, 670 i64 %3) 671 672 ret <vscale x 4 x half> %a 673} 674 675declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16( 676 <vscale x 4 x half>, 677 half, 678 <vscale x 4 x half>, 679 <vscale x 4 x i1>, 680 i64); 681 682define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 683; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: 684; CHECK: # %bb.0: # %entry 685; CHECK-NEXT: fmv.h.x ft0, a0 686; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 687; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 688; CHECK-NEXT: ret 689entry: 690 %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16( 691 <vscale x 4 x half> %0, 692 half %1, 693 <vscale x 4 x half> %2, 694 <vscale x 4 x i1> %3, 695 i64 %4) 696 697 ret <vscale x 4 x half> %a 698} 699 700declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16( 701 <vscale x 8 x half>, 702 half, 703 <vscale x 8 x half>, 704 i64); 705 706define <vscale x 8 x half> @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind { 707; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: 708; CHECK: # %bb.0: # %entry 709; CHECK-NEXT: fmv.h.x ft0, a0 710; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 711; CHECK-NEXT: vfmadd.vf v8, ft0, v10 712; CHECK-NEXT: ret 713entry: 714 %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16( 715 <vscale x 8 x half> %0, 716 half %1, 717 <vscale x 8 x half> %2, 718 i64 %3) 719 720 ret <vscale x 8 x half> %a 721} 722 723declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16( 724 <vscale x 8 x half>, 725 half, 726 <vscale x 8 x half>, 727 <vscale x 8 x i1>, 728 i64); 729 730define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 731; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: 732; CHECK: # %bb.0: # %entry 733; CHECK-NEXT: fmv.h.x ft0, a0 734; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 735; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t 736; CHECK-NEXT: ret 737entry: 738 %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16( 739 <vscale x 8 x half> %0, 740 half %1, 741 <vscale x 8 x half> %2, 742 <vscale x 8 x i1> %3, 743 i64 %4) 744 745 ret <vscale x 8 x half> %a 746} 747 748declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16( 749 <vscale x 16 x half>, 750 half, 751 <vscale x 16 x half>, 752 i64); 753 754define <vscale x 16 x half> @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind { 755; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: 756; CHECK: # %bb.0: # %entry 757; CHECK-NEXT: fmv.h.x ft0, a0 758; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 759; CHECK-NEXT: vfmadd.vf v8, ft0, v12 760; CHECK-NEXT: ret 761entry: 762 %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16( 763 <vscale x 16 x half> %0, 764 half %1, 765 <vscale x 16 x half> %2, 766 i64 %3) 767 768 ret <vscale x 16 x half> %a 769} 770 771declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16( 772 <vscale x 16 x half>, 773 half, 774 <vscale x 16 x half>, 775 <vscale x 16 x i1>, 776 i64); 777 778define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 779; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: 780; CHECK: # %bb.0: # %entry 781; CHECK-NEXT: fmv.h.x ft0, a0 782; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 783; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t 784; CHECK-NEXT: ret 785entry: 786 %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16( 787 <vscale x 16 x half> %0, 788 half %1, 789 <vscale x 16 x half> %2, 790 <vscale x 16 x i1> %3, 791 i64 %4) 792 793 ret <vscale x 16 x half> %a 794} 795 796declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32( 797 <vscale x 1 x float>, 798 float, 799 <vscale x 1 x float>, 800 i64); 801 802define <vscale x 1 x float> @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind { 803; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: 804; CHECK: # %bb.0: # %entry 805; CHECK-NEXT: fmv.w.x ft0, a0 806; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 807; CHECK-NEXT: vfmadd.vf v8, ft0, v9 808; CHECK-NEXT: ret 809entry: 810 %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32( 811 <vscale x 1 x float> %0, 812 float %1, 813 <vscale x 1 x float> %2, 814 i64 %3) 815 816 ret <vscale x 1 x float> %a 817} 818 819declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32( 820 <vscale x 1 x float>, 821 float, 822 <vscale x 1 x float>, 823 <vscale x 1 x i1>, 824 i64); 825 826define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 827; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: fmv.w.x ft0, a0 830; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 831; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 832; CHECK-NEXT: ret 833entry: 834 %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32( 835 <vscale x 1 x float> %0, 836 float %1, 837 <vscale x 1 x float> %2, 838 <vscale x 1 x i1> %3, 839 i64 %4) 840 841 ret <vscale x 1 x float> %a 842} 843 844declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32( 845 <vscale x 2 x float>, 846 float, 847 <vscale x 2 x float>, 848 i64); 849 850define <vscale x 2 x float> @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind { 851; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: 852; CHECK: # %bb.0: # %entry 853; CHECK-NEXT: fmv.w.x ft0, a0 854; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 855; CHECK-NEXT: vfmadd.vf v8, ft0, v9 856; CHECK-NEXT: ret 857entry: 858 %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32( 859 <vscale x 2 x float> %0, 860 float %1, 861 <vscale x 2 x float> %2, 862 i64 %3) 863 864 ret <vscale x 2 x float> %a 865} 866 867declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32( 868 <vscale x 2 x float>, 869 float, 870 <vscale x 2 x float>, 871 <vscale x 2 x i1>, 872 i64); 873 874define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 875; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: 876; CHECK: # %bb.0: # %entry 877; CHECK-NEXT: fmv.w.x ft0, a0 878; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 879; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 880; CHECK-NEXT: ret 881entry: 882 %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32( 883 <vscale x 2 x float> %0, 884 float %1, 885 <vscale x 2 x float> %2, 886 <vscale x 2 x i1> %3, 887 i64 %4) 888 889 ret <vscale x 2 x float> %a 890} 891 892declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32( 893 <vscale x 4 x float>, 894 float, 895 <vscale x 4 x float>, 896 i64); 897 898define <vscale x 4 x float> @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind { 899; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: 900; CHECK: # %bb.0: # %entry 901; CHECK-NEXT: fmv.w.x ft0, a0 902; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 903; CHECK-NEXT: vfmadd.vf v8, ft0, v10 904; CHECK-NEXT: ret 905entry: 906 %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32( 907 <vscale x 4 x float> %0, 908 float %1, 909 <vscale x 4 x float> %2, 910 i64 %3) 911 912 ret <vscale x 4 x float> %a 913} 914 915declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32( 916 <vscale x 4 x float>, 917 float, 918 <vscale x 4 x float>, 919 <vscale x 4 x i1>, 920 i64); 921 922define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 923; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: 924; CHECK: # %bb.0: # %entry 925; CHECK-NEXT: fmv.w.x ft0, a0 926; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 927; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t 928; CHECK-NEXT: ret 929entry: 930 %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32( 931 <vscale x 4 x float> %0, 932 float %1, 933 <vscale x 4 x float> %2, 934 <vscale x 4 x i1> %3, 935 i64 %4) 936 937 ret <vscale x 4 x float> %a 938} 939 940declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32( 941 <vscale x 8 x float>, 942 float, 943 <vscale x 8 x float>, 944 i64); 945 946define <vscale x 8 x float> @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind { 947; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: 948; CHECK: # %bb.0: # %entry 949; CHECK-NEXT: fmv.w.x ft0, a0 950; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 951; CHECK-NEXT: vfmadd.vf v8, ft0, v12 952; CHECK-NEXT: ret 953entry: 954 %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32( 955 <vscale x 8 x float> %0, 956 float %1, 957 <vscale x 8 x float> %2, 958 i64 %3) 959 960 ret <vscale x 8 x float> %a 961} 962 963declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32( 964 <vscale x 8 x float>, 965 float, 966 <vscale x 8 x float>, 967 <vscale x 8 x i1>, 968 i64); 969 970define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 971; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: 972; CHECK: # %bb.0: # %entry 973; CHECK-NEXT: fmv.w.x ft0, a0 974; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 975; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t 976; CHECK-NEXT: ret 977entry: 978 %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32( 979 <vscale x 8 x float> %0, 980 float %1, 981 <vscale x 8 x float> %2, 982 <vscale x 8 x i1> %3, 983 i64 %4) 984 985 ret <vscale x 8 x float> %a 986} 987 988declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64( 989 <vscale x 1 x double>, 990 double, 991 <vscale x 1 x double>, 992 i64); 993 994define <vscale x 1 x double> @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind { 995; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: 996; CHECK: # %bb.0: # %entry 997; CHECK-NEXT: fmv.d.x ft0, a0 998; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 999; CHECK-NEXT: vfmadd.vf v8, ft0, v9 1000; CHECK-NEXT: ret 1001entry: 1002 %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64( 1003 <vscale x 1 x double> %0, 1004 double %1, 1005 <vscale x 1 x double> %2, 1006 i64 %3) 1007 1008 ret <vscale x 1 x double> %a 1009} 1010 1011declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64( 1012 <vscale x 1 x double>, 1013 double, 1014 <vscale x 1 x double>, 1015 <vscale x 1 x i1>, 1016 i64); 1017 1018define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1019; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: 1020; CHECK: # %bb.0: # %entry 1021; CHECK-NEXT: fmv.d.x ft0, a0 1022; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1023; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t 1024; CHECK-NEXT: ret 1025entry: 1026 %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64( 1027 <vscale x 1 x double> %0, 1028 double %1, 1029 <vscale x 1 x double> %2, 1030 <vscale x 1 x i1> %3, 1031 i64 %4) 1032 1033 ret <vscale x 1 x double> %a 1034} 1035 1036declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64( 1037 <vscale x 2 x double>, 1038 double, 1039 <vscale x 2 x double>, 1040 i64); 1041 1042define <vscale x 2 x double> @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind { 1043; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: 1044; CHECK: # %bb.0: # %entry 1045; CHECK-NEXT: fmv.d.x ft0, a0 1046; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1047; CHECK-NEXT: vfmadd.vf v8, ft0, v10 1048; CHECK-NEXT: ret 1049entry: 1050 %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64( 1051 <vscale x 2 x double> %0, 1052 double %1, 1053 <vscale x 2 x double> %2, 1054 i64 %3) 1055 1056 ret <vscale x 2 x double> %a 1057} 1058 1059declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64( 1060 <vscale x 2 x double>, 1061 double, 1062 <vscale x 2 x double>, 1063 <vscale x 2 x i1>, 1064 i64); 1065 1066define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1067; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: 1068; CHECK: # %bb.0: # %entry 1069; CHECK-NEXT: fmv.d.x ft0, a0 1070; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1071; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t 1072; CHECK-NEXT: ret 1073entry: 1074 %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64( 1075 <vscale x 2 x double> %0, 1076 double %1, 1077 <vscale x 2 x double> %2, 1078 <vscale x 2 x i1> %3, 1079 i64 %4) 1080 1081 ret <vscale x 2 x double> %a 1082} 1083 1084declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64( 1085 <vscale x 4 x double>, 1086 double, 1087 <vscale x 4 x double>, 1088 i64); 1089 1090define <vscale x 4 x double> @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind { 1091; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: 1092; CHECK: # %bb.0: # %entry 1093; CHECK-NEXT: fmv.d.x ft0, a0 1094; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1095; CHECK-NEXT: vfmadd.vf v8, ft0, v12 1096; CHECK-NEXT: ret 1097entry: 1098 %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64( 1099 <vscale x 4 x double> %0, 1100 double %1, 1101 <vscale x 4 x double> %2, 1102 i64 %3) 1103 1104 ret <vscale x 4 x double> %a 1105} 1106 1107declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64( 1108 <vscale x 4 x double>, 1109 double, 1110 <vscale x 4 x double>, 1111 <vscale x 4 x i1>, 1112 i64); 1113 1114define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1115; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: 1116; CHECK: # %bb.0: # %entry 1117; CHECK-NEXT: fmv.d.x ft0, a0 1118; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1119; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t 1120; CHECK-NEXT: ret 1121entry: 1122 %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64( 1123 <vscale x 4 x double> %0, 1124 double %1, 1125 <vscale x 4 x double> %2, 1126 <vscale x 4 x i1> %3, 1127 i64 %4) 1128 1129 ret <vscale x 4 x double> %a 1130} 1131