1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16( 5 <vscale x 4 x half>, 6 <vscale x 1 x half>, 7 <vscale x 4 x half>, 8 i64); 9 10define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 11; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 14; CHECK-NEXT: vfredmax.vs v8, v9, v10 15; CHECK-NEXT: ret 16entry: 17 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16( 18 <vscale x 4 x half> %0, 19 <vscale x 1 x half> %1, 20 <vscale x 4 x half> %2, 21 i64 %3) 22 23 ret <vscale x 4 x half> %a 24} 25 26declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( 27 <vscale x 4 x half>, 28 <vscale x 1 x half>, 29 <vscale x 4 x half>, 30 <vscale x 1 x i1>, 31 i64); 32 33define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 34; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 37; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 38; CHECK-NEXT: ret 39entry: 40 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16( 41 <vscale x 4 x half> %0, 42 <vscale x 1 x half> %1, 43 <vscale x 4 x half> %2, 44 <vscale x 1 x i1> %3, 45 i64 %4) 46 47 ret <vscale x 4 x half> %a 48} 49 50declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16( 51 <vscale x 4 x half>, 52 <vscale x 2 x half>, 53 <vscale x 4 x half>, 54 i64); 55 56define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 57; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: 58; CHECK: # %bb.0: # %entry 59; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 60; CHECK-NEXT: vfredmax.vs v8, v9, v10 61; CHECK-NEXT: ret 62entry: 63 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16( 64 <vscale x 4 x half> %0, 65 <vscale x 2 x half> %1, 66 <vscale x 4 x half> %2, 67 i64 %3) 68 69 ret <vscale x 4 x half> %a 70} 71 72declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( 73 <vscale x 4 x half>, 74 <vscale x 2 x half>, 75 <vscale x 4 x half>, 76 <vscale x 2 x i1>, 77 i64); 78 79define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 80; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: 81; CHECK: # %bb.0: # %entry 82; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 83; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 84; CHECK-NEXT: ret 85entry: 86 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16( 87 <vscale x 4 x half> %0, 88 <vscale x 2 x half> %1, 89 <vscale x 4 x half> %2, 90 <vscale x 2 x i1> %3, 91 i64 %4) 92 93 ret <vscale x 4 x half> %a 94} 95 96declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16( 97 <vscale x 4 x half>, 98 <vscale x 4 x half>, 99 <vscale x 4 x half>, 100 i64); 101 102define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 103; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: 104; CHECK: # %bb.0: # %entry 105; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 106; CHECK-NEXT: vfredmax.vs v8, v9, v10 107; CHECK-NEXT: ret 108entry: 109 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16( 110 <vscale x 4 x half> %0, 111 <vscale x 4 x half> %1, 112 <vscale x 4 x half> %2, 113 i64 %3) 114 115 ret <vscale x 4 x half> %a 116} 117 118declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( 119 <vscale x 4 x half>, 120 <vscale x 4 x half>, 121 <vscale x 4 x half>, 122 <vscale x 4 x i1>, 123 i64); 124 125define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 126; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: 127; CHECK: # %bb.0: # %entry 128; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 129; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 130; CHECK-NEXT: ret 131entry: 132 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16( 133 <vscale x 4 x half> %0, 134 <vscale x 4 x half> %1, 135 <vscale x 4 x half> %2, 136 <vscale x 4 x i1> %3, 137 i64 %4) 138 139 ret <vscale x 4 x half> %a 140} 141 142declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16( 143 <vscale x 4 x half>, 144 <vscale x 8 x half>, 145 <vscale x 4 x half>, 146 i64); 147 148define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 149; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: 150; CHECK: # %bb.0: # %entry 151; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 152; CHECK-NEXT: vfredmax.vs v8, v10, v9 153; CHECK-NEXT: ret 154entry: 155 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16( 156 <vscale x 4 x half> %0, 157 <vscale x 8 x half> %1, 158 <vscale x 4 x half> %2, 159 i64 %3) 160 161 ret <vscale x 4 x half> %a 162} 163 164declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( 165 <vscale x 4 x half>, 166 <vscale x 8 x half>, 167 <vscale x 4 x half>, 168 <vscale x 8 x i1>, 169 i64); 170 171define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 172; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: 173; CHECK: # %bb.0: # %entry 174; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 175; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t 176; CHECK-NEXT: ret 177entry: 178 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16( 179 <vscale x 4 x half> %0, 180 <vscale x 8 x half> %1, 181 <vscale x 4 x half> %2, 182 <vscale x 8 x i1> %3, 183 i64 %4) 184 185 ret <vscale x 4 x half> %a 186} 187 188declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16( 189 <vscale x 4 x half>, 190 <vscale x 16 x half>, 191 <vscale x 4 x half>, 192 i64); 193 194define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 195; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: 196; CHECK: # %bb.0: # %entry 197; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 198; CHECK-NEXT: vfredmax.vs v8, v12, v9 199; CHECK-NEXT: ret 200entry: 201 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16( 202 <vscale x 4 x half> %0, 203 <vscale x 16 x half> %1, 204 <vscale x 4 x half> %2, 205 i64 %3) 206 207 ret <vscale x 4 x half> %a 208} 209 210declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( 211 <vscale x 4 x half>, 212 <vscale x 16 x half>, 213 <vscale x 4 x half>, 214 <vscale x 16 x i1>, 215 i64); 216 217define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 218; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: 219; CHECK: # %bb.0: # %entry 220; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 221; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t 222; CHECK-NEXT: ret 223entry: 224 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16( 225 <vscale x 4 x half> %0, 226 <vscale x 16 x half> %1, 227 <vscale x 4 x half> %2, 228 <vscale x 16 x i1> %3, 229 i64 %4) 230 231 ret <vscale x 4 x half> %a 232} 233 234declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16( 235 <vscale x 4 x half>, 236 <vscale x 32 x half>, 237 <vscale x 4 x half>, 238 i64); 239 240define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind { 241; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: 242; CHECK: # %bb.0: # %entry 243; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu 244; CHECK-NEXT: vfredmax.vs v8, v16, v9 245; CHECK-NEXT: ret 246entry: 247 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16( 248 <vscale x 4 x half> %0, 249 <vscale x 32 x half> %1, 250 <vscale x 4 x half> %2, 251 i64 %3) 252 253 ret <vscale x 4 x half> %a 254} 255 256declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( 257 <vscale x 4 x half>, 258 <vscale x 32 x half>, 259 <vscale x 4 x half>, 260 <vscale x 32 x i1>, 261 i64); 262 263define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 264; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: 265; CHECK: # %bb.0: # %entry 266; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu 267; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t 268; CHECK-NEXT: ret 269entry: 270 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16( 271 <vscale x 4 x half> %0, 272 <vscale x 32 x half> %1, 273 <vscale x 4 x half> %2, 274 <vscale x 32 x i1> %3, 275 i64 %4) 276 277 ret <vscale x 4 x half> %a 278} 279 280declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32( 281 <vscale x 2 x float>, 282 <vscale x 1 x float>, 283 <vscale x 2 x float>, 284 i64); 285 286define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 287; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: 288; CHECK: # %bb.0: # %entry 289; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 290; CHECK-NEXT: vfredmax.vs v8, v9, v10 291; CHECK-NEXT: ret 292entry: 293 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32( 294 <vscale x 2 x float> %0, 295 <vscale x 1 x float> %1, 296 <vscale x 2 x float> %2, 297 i64 %3) 298 299 ret <vscale x 2 x float> %a 300} 301 302declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( 303 <vscale x 2 x float>, 304 <vscale x 1 x float>, 305 <vscale x 2 x float>, 306 <vscale x 1 x i1>, 307 i64); 308 309define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 310; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: 311; CHECK: # %bb.0: # %entry 312; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 313; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 314; CHECK-NEXT: ret 315entry: 316 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32( 317 <vscale x 2 x float> %0, 318 <vscale x 1 x float> %1, 319 <vscale x 2 x float> %2, 320 <vscale x 1 x i1> %3, 321 i64 %4) 322 323 ret <vscale x 2 x float> %a 324} 325 326declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32( 327 <vscale x 2 x float>, 328 <vscale x 2 x float>, 329 <vscale x 2 x float>, 330 i64); 331 332define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 333; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: 334; CHECK: # %bb.0: # %entry 335; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 336; CHECK-NEXT: vfredmax.vs v8, v9, v10 337; CHECK-NEXT: ret 338entry: 339 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32( 340 <vscale x 2 x float> %0, 341 <vscale x 2 x float> %1, 342 <vscale x 2 x float> %2, 343 i64 %3) 344 345 ret <vscale x 2 x float> %a 346} 347 348declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( 349 <vscale x 2 x float>, 350 <vscale x 2 x float>, 351 <vscale x 2 x float>, 352 <vscale x 2 x i1>, 353 i64); 354 355define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 356; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: 357; CHECK: # %bb.0: # %entry 358; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 359; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 360; CHECK-NEXT: ret 361entry: 362 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32( 363 <vscale x 2 x float> %0, 364 <vscale x 2 x float> %1, 365 <vscale x 2 x float> %2, 366 <vscale x 2 x i1> %3, 367 i64 %4) 368 369 ret <vscale x 2 x float> %a 370} 371 372declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32( 373 <vscale x 2 x float>, 374 <vscale x 4 x float>, 375 <vscale x 2 x float>, 376 i64); 377 378define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 379; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: 380; CHECK: # %bb.0: # %entry 381; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 382; CHECK-NEXT: vfredmax.vs v8, v10, v9 383; CHECK-NEXT: ret 384entry: 385 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32( 386 <vscale x 2 x float> %0, 387 <vscale x 4 x float> %1, 388 <vscale x 2 x float> %2, 389 i64 %3) 390 391 ret <vscale x 2 x float> %a 392} 393 394declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( 395 <vscale x 2 x float>, 396 <vscale x 4 x float>, 397 <vscale x 2 x float>, 398 <vscale x 4 x i1>, 399 i64); 400 401define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 402; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: 403; CHECK: # %bb.0: # %entry 404; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 405; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t 406; CHECK-NEXT: ret 407entry: 408 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32( 409 <vscale x 2 x float> %0, 410 <vscale x 4 x float> %1, 411 <vscale x 2 x float> %2, 412 <vscale x 4 x i1> %3, 413 i64 %4) 414 415 ret <vscale x 2 x float> %a 416} 417 418declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32( 419 <vscale x 2 x float>, 420 <vscale x 8 x float>, 421 <vscale x 2 x float>, 422 i64); 423 424define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 425; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: 426; CHECK: # %bb.0: # %entry 427; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 428; CHECK-NEXT: vfredmax.vs v8, v12, v9 429; CHECK-NEXT: ret 430entry: 431 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32( 432 <vscale x 2 x float> %0, 433 <vscale x 8 x float> %1, 434 <vscale x 2 x float> %2, 435 i64 %3) 436 437 ret <vscale x 2 x float> %a 438} 439 440declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( 441 <vscale x 2 x float>, 442 <vscale x 8 x float>, 443 <vscale x 2 x float>, 444 <vscale x 8 x i1>, 445 i64); 446 447define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 448; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: 449; CHECK: # %bb.0: # %entry 450; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 451; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t 452; CHECK-NEXT: ret 453entry: 454 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32( 455 <vscale x 2 x float> %0, 456 <vscale x 8 x float> %1, 457 <vscale x 2 x float> %2, 458 <vscale x 8 x i1> %3, 459 i64 %4) 460 461 ret <vscale x 2 x float> %a 462} 463 464declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32( 465 <vscale x 2 x float>, 466 <vscale x 16 x float>, 467 <vscale x 2 x float>, 468 i64); 469 470define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind { 471; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu 474; CHECK-NEXT: vfredmax.vs v8, v16, v9 475; CHECK-NEXT: ret 476entry: 477 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32( 478 <vscale x 2 x float> %0, 479 <vscale x 16 x float> %1, 480 <vscale x 2 x float> %2, 481 i64 %3) 482 483 ret <vscale x 2 x float> %a 484} 485 486declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( 487 <vscale x 2 x float>, 488 <vscale x 16 x float>, 489 <vscale x 2 x float>, 490 <vscale x 16 x i1>, 491 i64); 492 493define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 494; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: 495; CHECK: # %bb.0: # %entry 496; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu 497; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t 498; CHECK-NEXT: ret 499entry: 500 %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32( 501 <vscale x 2 x float> %0, 502 <vscale x 16 x float> %1, 503 <vscale x 2 x float> %2, 504 <vscale x 16 x i1> %3, 505 i64 %4) 506 507 ret <vscale x 2 x float> %a 508} 509 510declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64( 511 <vscale x 1 x double>, 512 <vscale x 1 x double>, 513 <vscale x 1 x double>, 514 i64); 515 516define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind { 517; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: 518; CHECK: # %bb.0: # %entry 519; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 520; CHECK-NEXT: vfredmax.vs v8, v9, v10 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64( 524 <vscale x 1 x double> %0, 525 <vscale x 1 x double> %1, 526 <vscale x 1 x double> %2, 527 i64 %3) 528 529 ret <vscale x 1 x double> %a 530} 531 532declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( 533 <vscale x 1 x double>, 534 <vscale x 1 x double>, 535 <vscale x 1 x double>, 536 <vscale x 1 x i1>, 537 i64); 538 539define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 540; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: 541; CHECK: # %bb.0: # %entry 542; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 543; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t 544; CHECK-NEXT: ret 545entry: 546 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64( 547 <vscale x 1 x double> %0, 548 <vscale x 1 x double> %1, 549 <vscale x 1 x double> %2, 550 <vscale x 1 x i1> %3, 551 i64 %4) 552 553 ret <vscale x 1 x double> %a 554} 555 556declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64( 557 <vscale x 1 x double>, 558 <vscale x 2 x double>, 559 <vscale x 1 x double>, 560 i64); 561 562define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind { 563; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: 564; CHECK: # %bb.0: # %entry 565; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 566; CHECK-NEXT: vfredmax.vs v8, v10, v9 567; CHECK-NEXT: ret 568entry: 569 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64( 570 <vscale x 1 x double> %0, 571 <vscale x 2 x double> %1, 572 <vscale x 1 x double> %2, 573 i64 %3) 574 575 ret <vscale x 1 x double> %a 576} 577 578declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( 579 <vscale x 1 x double>, 580 <vscale x 2 x double>, 581 <vscale x 1 x double>, 582 <vscale x 2 x i1>, 583 i64); 584 585define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 586; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: 587; CHECK: # %bb.0: # %entry 588; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 589; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t 590; CHECK-NEXT: ret 591entry: 592 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64( 593 <vscale x 1 x double> %0, 594 <vscale x 2 x double> %1, 595 <vscale x 1 x double> %2, 596 <vscale x 2 x i1> %3, 597 i64 %4) 598 599 ret <vscale x 1 x double> %a 600} 601 602declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64( 603 <vscale x 1 x double>, 604 <vscale x 4 x double>, 605 <vscale x 1 x double>, 606 i64); 607 608define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind { 609; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: 610; CHECK: # %bb.0: # %entry 611; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 612; CHECK-NEXT: vfredmax.vs v8, v12, v9 613; CHECK-NEXT: ret 614entry: 615 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64( 616 <vscale x 1 x double> %0, 617 <vscale x 4 x double> %1, 618 <vscale x 1 x double> %2, 619 i64 %3) 620 621 ret <vscale x 1 x double> %a 622} 623 624declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( 625 <vscale x 1 x double>, 626 <vscale x 4 x double>, 627 <vscale x 1 x double>, 628 <vscale x 4 x i1>, 629 i64); 630 631define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 632; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: 633; CHECK: # %bb.0: # %entry 634; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 635; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t 636; CHECK-NEXT: ret 637entry: 638 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64( 639 <vscale x 1 x double> %0, 640 <vscale x 4 x double> %1, 641 <vscale x 1 x double> %2, 642 <vscale x 4 x i1> %3, 643 i64 %4) 644 645 ret <vscale x 1 x double> %a 646} 647 648declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64( 649 <vscale x 1 x double>, 650 <vscale x 8 x double>, 651 <vscale x 1 x double>, 652 i64); 653 654define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind { 655; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: 656; CHECK: # %bb.0: # %entry 657; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu 658; CHECK-NEXT: vfredmax.vs v8, v16, v9 659; CHECK-NEXT: ret 660entry: 661 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64( 662 <vscale x 1 x double> %0, 663 <vscale x 8 x double> %1, 664 <vscale x 1 x double> %2, 665 i64 %3) 666 667 ret <vscale x 1 x double> %a 668} 669 670declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( 671 <vscale x 1 x double>, 672 <vscale x 8 x double>, 673 <vscale x 1 x double>, 674 <vscale x 8 x i1>, 675 i64); 676 677define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 678; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: 679; CHECK: # %bb.0: # %entry 680; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu 681; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t 682; CHECK-NEXT: ret 683entry: 684 %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64( 685 <vscale x 1 x double> %0, 686 <vscale x 8 x double> %1, 687 <vscale x 1 x double> %2, 688 <vscale x 8 x i1> %3, 689 i64 %4) 690 691 ret <vscale x 1 x double> %a 692} 693