1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16( 5 <vscale x 1 x half>, 6 <vscale x 1 x half>, 7 i64); 8 9define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind { 10; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: 11; CHECK: # %bb.0: # %entry 12; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 13; CHECK-NEXT: vfmin.vv v8, v8, v9 14; CHECK-NEXT: ret 15entry: 16 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16( 17 <vscale x 1 x half> %0, 18 <vscale x 1 x half> %1, 19 i64 %2) 20 21 ret <vscale x 1 x half> %a 22} 23 24declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( 25 <vscale x 1 x half>, 26 <vscale x 1 x half>, 27 <vscale x 1 x half>, 28 <vscale x 1 x i1>, 29 i64); 30 31define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 32; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: 33; CHECK: # %bb.0: # %entry 34; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 35; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 36; CHECK-NEXT: ret 37entry: 38 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( 39 <vscale x 1 x half> %0, 40 <vscale x 1 x half> %1, 41 <vscale x 1 x half> %2, 42 <vscale x 1 x i1> %3, 43 i64 %4) 44 45 ret <vscale x 1 x half> %a 46} 47 48declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16( 49 <vscale x 2 x half>, 50 <vscale x 2 x half>, 51 i64); 52 53define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind { 54; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 57; CHECK-NEXT: vfmin.vv v8, v8, v9 58; CHECK-NEXT: ret 59entry: 60 %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16( 61 <vscale x 2 x half> %0, 62 <vscale x 2 x half> %1, 63 i64 %2) 64 65 ret <vscale x 2 x half> %a 66} 67 68declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( 69 <vscale x 2 x half>, 70 <vscale x 2 x half>, 71 <vscale x 2 x half>, 72 <vscale x 2 x i1>, 73 i64); 74 75define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 76; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: 77; CHECK: # %bb.0: # %entry 78; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 79; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 80; CHECK-NEXT: ret 81entry: 82 %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( 83 <vscale x 2 x half> %0, 84 <vscale x 2 x half> %1, 85 <vscale x 2 x half> %2, 86 <vscale x 2 x i1> %3, 87 i64 %4) 88 89 ret <vscale x 2 x half> %a 90} 91 92declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16( 93 <vscale x 4 x half>, 94 <vscale x 4 x half>, 95 i64); 96 97define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind { 98; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: 99; CHECK: # %bb.0: # %entry 100; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 101; CHECK-NEXT: vfmin.vv v8, v8, v9 102; CHECK-NEXT: ret 103entry: 104 %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16( 105 <vscale x 4 x half> %0, 106 <vscale x 4 x half> %1, 107 i64 %2) 108 109 ret <vscale x 4 x half> %a 110} 111 112declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( 113 <vscale x 4 x half>, 114 <vscale x 4 x half>, 115 <vscale x 4 x half>, 116 <vscale x 4 x i1>, 117 i64); 118 119define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 120; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: 121; CHECK: # %bb.0: # %entry 122; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 123; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 124; CHECK-NEXT: ret 125entry: 126 %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( 127 <vscale x 4 x half> %0, 128 <vscale x 4 x half> %1, 129 <vscale x 4 x half> %2, 130 <vscale x 4 x i1> %3, 131 i64 %4) 132 133 ret <vscale x 4 x half> %a 134} 135 136declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16( 137 <vscale x 8 x half>, 138 <vscale x 8 x half>, 139 i64); 140 141define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind { 142; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: 143; CHECK: # %bb.0: # %entry 144; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 145; CHECK-NEXT: vfmin.vv v8, v8, v10 146; CHECK-NEXT: ret 147entry: 148 %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16( 149 <vscale x 8 x half> %0, 150 <vscale x 8 x half> %1, 151 i64 %2) 152 153 ret <vscale x 8 x half> %a 154} 155 156declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( 157 <vscale x 8 x half>, 158 <vscale x 8 x half>, 159 <vscale x 8 x half>, 160 <vscale x 8 x i1>, 161 i64); 162 163define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 164; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 167; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t 168; CHECK-NEXT: ret 169entry: 170 %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( 171 <vscale x 8 x half> %0, 172 <vscale x 8 x half> %1, 173 <vscale x 8 x half> %2, 174 <vscale x 8 x i1> %3, 175 i64 %4) 176 177 ret <vscale x 8 x half> %a 178} 179 180declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16( 181 <vscale x 16 x half>, 182 <vscale x 16 x half>, 183 i64); 184 185define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind { 186; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: 187; CHECK: # %bb.0: # %entry 188; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 189; CHECK-NEXT: vfmin.vv v8, v8, v12 190; CHECK-NEXT: ret 191entry: 192 %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16( 193 <vscale x 16 x half> %0, 194 <vscale x 16 x half> %1, 195 i64 %2) 196 197 ret <vscale x 16 x half> %a 198} 199 200declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( 201 <vscale x 16 x half>, 202 <vscale x 16 x half>, 203 <vscale x 16 x half>, 204 <vscale x 16 x i1>, 205 i64); 206 207define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 208; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: 209; CHECK: # %bb.0: # %entry 210; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 211; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t 212; CHECK-NEXT: ret 213entry: 214 %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( 215 <vscale x 16 x half> %0, 216 <vscale x 16 x half> %1, 217 <vscale x 16 x half> %2, 218 <vscale x 16 x i1> %3, 219 i64 %4) 220 221 ret <vscale x 16 x half> %a 222} 223 224declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16( 225 <vscale x 32 x half>, 226 <vscale x 32 x half>, 227 i64); 228 229define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind { 230; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: 231; CHECK: # %bb.0: # %entry 232; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 233; CHECK-NEXT: vfmin.vv v8, v8, v16 234; CHECK-NEXT: ret 235entry: 236 %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16( 237 <vscale x 32 x half> %0, 238 <vscale x 32 x half> %1, 239 i64 %2) 240 241 ret <vscale x 32 x half> %a 242} 243 244declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( 245 <vscale x 32 x half>, 246 <vscale x 32 x half>, 247 <vscale x 32 x half>, 248 <vscale x 32 x i1>, 249 i64); 250 251define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 252; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: 253; CHECK: # %bb.0: # %entry 254; CHECK-NEXT: vl8re16.v v24, (a0) 255; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 256; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t 257; CHECK-NEXT: ret 258entry: 259 %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( 260 <vscale x 32 x half> %0, 261 <vscale x 32 x half> %1, 262 <vscale x 32 x half> %2, 263 <vscale x 32 x i1> %3, 264 i64 %4) 265 266 ret <vscale x 32 x half> %a 267} 268 269declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32( 270 <vscale x 1 x float>, 271 <vscale x 1 x float>, 272 i64); 273 274define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind { 275; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: 276; CHECK: # %bb.0: # %entry 277; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 278; CHECK-NEXT: vfmin.vv v8, v8, v9 279; CHECK-NEXT: ret 280entry: 281 %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32( 282 <vscale x 1 x float> %0, 283 <vscale x 1 x float> %1, 284 i64 %2) 285 286 ret <vscale x 1 x float> %a 287} 288 289declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( 290 <vscale x 1 x float>, 291 <vscale x 1 x float>, 292 <vscale x 1 x float>, 293 <vscale x 1 x i1>, 294 i64); 295 296define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 297; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: 298; CHECK: # %bb.0: # %entry 299; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 300; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 301; CHECK-NEXT: ret 302entry: 303 %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( 304 <vscale x 1 x float> %0, 305 <vscale x 1 x float> %1, 306 <vscale x 1 x float> %2, 307 <vscale x 1 x i1> %3, 308 i64 %4) 309 310 ret <vscale x 1 x float> %a 311} 312 313declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32( 314 <vscale x 2 x float>, 315 <vscale x 2 x float>, 316 i64); 317 318define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind { 319; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 322; CHECK-NEXT: vfmin.vv v8, v8, v9 323; CHECK-NEXT: ret 324entry: 325 %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32( 326 <vscale x 2 x float> %0, 327 <vscale x 2 x float> %1, 328 i64 %2) 329 330 ret <vscale x 2 x float> %a 331} 332 333declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( 334 <vscale x 2 x float>, 335 <vscale x 2 x float>, 336 <vscale x 2 x float>, 337 <vscale x 2 x i1>, 338 i64); 339 340define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 341; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: 342; CHECK: # %bb.0: # %entry 343; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 344; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 345; CHECK-NEXT: ret 346entry: 347 %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( 348 <vscale x 2 x float> %0, 349 <vscale x 2 x float> %1, 350 <vscale x 2 x float> %2, 351 <vscale x 2 x i1> %3, 352 i64 %4) 353 354 ret <vscale x 2 x float> %a 355} 356 357declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32( 358 <vscale x 4 x float>, 359 <vscale x 4 x float>, 360 i64); 361 362define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind { 363; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: 364; CHECK: # %bb.0: # %entry 365; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 366; CHECK-NEXT: vfmin.vv v8, v8, v10 367; CHECK-NEXT: ret 368entry: 369 %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32( 370 <vscale x 4 x float> %0, 371 <vscale x 4 x float> %1, 372 i64 %2) 373 374 ret <vscale x 4 x float> %a 375} 376 377declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( 378 <vscale x 4 x float>, 379 <vscale x 4 x float>, 380 <vscale x 4 x float>, 381 <vscale x 4 x i1>, 382 i64); 383 384define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 385; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: 386; CHECK: # %bb.0: # %entry 387; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 388; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t 389; CHECK-NEXT: ret 390entry: 391 %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( 392 <vscale x 4 x float> %0, 393 <vscale x 4 x float> %1, 394 <vscale x 4 x float> %2, 395 <vscale x 4 x i1> %3, 396 i64 %4) 397 398 ret <vscale x 4 x float> %a 399} 400 401declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32( 402 <vscale x 8 x float>, 403 <vscale x 8 x float>, 404 i64); 405 406define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind { 407; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: 408; CHECK: # %bb.0: # %entry 409; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 410; CHECK-NEXT: vfmin.vv v8, v8, v12 411; CHECK-NEXT: ret 412entry: 413 %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32( 414 <vscale x 8 x float> %0, 415 <vscale x 8 x float> %1, 416 i64 %2) 417 418 ret <vscale x 8 x float> %a 419} 420 421declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( 422 <vscale x 8 x float>, 423 <vscale x 8 x float>, 424 <vscale x 8 x float>, 425 <vscale x 8 x i1>, 426 i64); 427 428define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 429; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: 430; CHECK: # %bb.0: # %entry 431; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 432; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t 433; CHECK-NEXT: ret 434entry: 435 %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( 436 <vscale x 8 x float> %0, 437 <vscale x 8 x float> %1, 438 <vscale x 8 x float> %2, 439 <vscale x 8 x i1> %3, 440 i64 %4) 441 442 ret <vscale x 8 x float> %a 443} 444 445declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32( 446 <vscale x 16 x float>, 447 <vscale x 16 x float>, 448 i64); 449 450define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind { 451; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: 452; CHECK: # %bb.0: # %entry 453; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 454; CHECK-NEXT: vfmin.vv v8, v8, v16 455; CHECK-NEXT: ret 456entry: 457 %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32( 458 <vscale x 16 x float> %0, 459 <vscale x 16 x float> %1, 460 i64 %2) 461 462 ret <vscale x 16 x float> %a 463} 464 465declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( 466 <vscale x 16 x float>, 467 <vscale x 16 x float>, 468 <vscale x 16 x float>, 469 <vscale x 16 x i1>, 470 i64); 471 472define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 473; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: 474; CHECK: # %bb.0: # %entry 475; CHECK-NEXT: vl8re32.v v24, (a0) 476; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 477; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t 478; CHECK-NEXT: ret 479entry: 480 %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( 481 <vscale x 16 x float> %0, 482 <vscale x 16 x float> %1, 483 <vscale x 16 x float> %2, 484 <vscale x 16 x i1> %3, 485 i64 %4) 486 487 ret <vscale x 16 x float> %a 488} 489 490declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64( 491 <vscale x 1 x double>, 492 <vscale x 1 x double>, 493 i64); 494 495define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind { 496; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: 497; CHECK: # %bb.0: # %entry 498; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 499; CHECK-NEXT: vfmin.vv v8, v8, v9 500; CHECK-NEXT: ret 501entry: 502 %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64( 503 <vscale x 1 x double> %0, 504 <vscale x 1 x double> %1, 505 i64 %2) 506 507 ret <vscale x 1 x double> %a 508} 509 510declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( 511 <vscale x 1 x double>, 512 <vscale x 1 x double>, 513 <vscale x 1 x double>, 514 <vscale x 1 x i1>, 515 i64); 516 517define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 518; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: 519; CHECK: # %bb.0: # %entry 520; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 521; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t 522; CHECK-NEXT: ret 523entry: 524 %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( 525 <vscale x 1 x double> %0, 526 <vscale x 1 x double> %1, 527 <vscale x 1 x double> %2, 528 <vscale x 1 x i1> %3, 529 i64 %4) 530 531 ret <vscale x 1 x double> %a 532} 533 534declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64( 535 <vscale x 2 x double>, 536 <vscale x 2 x double>, 537 i64); 538 539define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind { 540; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: 541; CHECK: # %bb.0: # %entry 542; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 543; CHECK-NEXT: vfmin.vv v8, v8, v10 544; CHECK-NEXT: ret 545entry: 546 %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64( 547 <vscale x 2 x double> %0, 548 <vscale x 2 x double> %1, 549 i64 %2) 550 551 ret <vscale x 2 x double> %a 552} 553 554declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( 555 <vscale x 2 x double>, 556 <vscale x 2 x double>, 557 <vscale x 2 x double>, 558 <vscale x 2 x i1>, 559 i64); 560 561define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 562; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: 563; CHECK: # %bb.0: # %entry 564; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 565; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t 566; CHECK-NEXT: ret 567entry: 568 %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( 569 <vscale x 2 x double> %0, 570 <vscale x 2 x double> %1, 571 <vscale x 2 x double> %2, 572 <vscale x 2 x i1> %3, 573 i64 %4) 574 575 ret <vscale x 2 x double> %a 576} 577 578declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64( 579 <vscale x 4 x double>, 580 <vscale x 4 x double>, 581 i64); 582 583define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind { 584; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: 585; CHECK: # %bb.0: # %entry 586; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 587; CHECK-NEXT: vfmin.vv v8, v8, v12 588; CHECK-NEXT: ret 589entry: 590 %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64( 591 <vscale x 4 x double> %0, 592 <vscale x 4 x double> %1, 593 i64 %2) 594 595 ret <vscale x 4 x double> %a 596} 597 598declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( 599 <vscale x 4 x double>, 600 <vscale x 4 x double>, 601 <vscale x 4 x double>, 602 <vscale x 4 x i1>, 603 i64); 604 605define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 606; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: 607; CHECK: # %bb.0: # %entry 608; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 609; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t 610; CHECK-NEXT: ret 611entry: 612 %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( 613 <vscale x 4 x double> %0, 614 <vscale x 4 x double> %1, 615 <vscale x 4 x double> %2, 616 <vscale x 4 x i1> %3, 617 i64 %4) 618 619 ret <vscale x 4 x double> %a 620} 621 622declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64( 623 <vscale x 8 x double>, 624 <vscale x 8 x double>, 625 i64); 626 627define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind { 628; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: 629; CHECK: # %bb.0: # %entry 630; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 631; CHECK-NEXT: vfmin.vv v8, v8, v16 632; CHECK-NEXT: ret 633entry: 634 %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64( 635 <vscale x 8 x double> %0, 636 <vscale x 8 x double> %1, 637 i64 %2) 638 639 ret <vscale x 8 x double> %a 640} 641 642declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( 643 <vscale x 8 x double>, 644 <vscale x 8 x double>, 645 <vscale x 8 x double>, 646 <vscale x 8 x i1>, 647 i64); 648 649define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 650; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: 651; CHECK: # %bb.0: # %entry 652; CHECK-NEXT: vl8re64.v v24, (a0) 653; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 654; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t 655; CHECK-NEXT: ret 656entry: 657 %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( 658 <vscale x 8 x double> %0, 659 <vscale x 8 x double> %1, 660 <vscale x 8 x double> %2, 661 <vscale x 8 x i1> %3, 662 i64 %4) 663 664 ret <vscale x 8 x double> %a 665} 666 667declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16( 668 <vscale x 1 x half>, 669 half, 670 i64); 671 672define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind { 673; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: 674; CHECK: # %bb.0: # %entry 675; CHECK-NEXT: fmv.h.x ft0, a0 676; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 677; CHECK-NEXT: vfmin.vf v8, v8, ft0 678; CHECK-NEXT: ret 679entry: 680 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16( 681 <vscale x 1 x half> %0, 682 half %1, 683 i64 %2) 684 685 ret <vscale x 1 x half> %a 686} 687 688declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16( 689 <vscale x 1 x half>, 690 <vscale x 1 x half>, 691 half, 692 <vscale x 1 x i1>, 693 i64); 694 695define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 696; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: 697; CHECK: # %bb.0: # %entry 698; CHECK-NEXT: fmv.h.x ft0, a0 699; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 700; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 701; CHECK-NEXT: ret 702entry: 703 %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16( 704 <vscale x 1 x half> %0, 705 <vscale x 1 x half> %1, 706 half %2, 707 <vscale x 1 x i1> %3, 708 i64 %4) 709 710 ret <vscale x 1 x half> %a 711} 712 713declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16( 714 <vscale x 2 x half>, 715 half, 716 i64); 717 718define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind { 719; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: 720; CHECK: # %bb.0: # %entry 721; CHECK-NEXT: fmv.h.x ft0, a0 722; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 723; CHECK-NEXT: vfmin.vf v8, v8, ft0 724; CHECK-NEXT: ret 725entry: 726 %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16( 727 <vscale x 2 x half> %0, 728 half %1, 729 i64 %2) 730 731 ret <vscale x 2 x half> %a 732} 733 734declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16( 735 <vscale x 2 x half>, 736 <vscale x 2 x half>, 737 half, 738 <vscale x 2 x i1>, 739 i64); 740 741define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 742; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: 743; CHECK: # %bb.0: # %entry 744; CHECK-NEXT: fmv.h.x ft0, a0 745; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 746; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 747; CHECK-NEXT: ret 748entry: 749 %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16( 750 <vscale x 2 x half> %0, 751 <vscale x 2 x half> %1, 752 half %2, 753 <vscale x 2 x i1> %3, 754 i64 %4) 755 756 ret <vscale x 2 x half> %a 757} 758 759declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16( 760 <vscale x 4 x half>, 761 half, 762 i64); 763 764define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind { 765; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: 766; CHECK: # %bb.0: # %entry 767; CHECK-NEXT: fmv.h.x ft0, a0 768; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 769; CHECK-NEXT: vfmin.vf v8, v8, ft0 770; CHECK-NEXT: ret 771entry: 772 %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16( 773 <vscale x 4 x half> %0, 774 half %1, 775 i64 %2) 776 777 ret <vscale x 4 x half> %a 778} 779 780declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16( 781 <vscale x 4 x half>, 782 <vscale x 4 x half>, 783 half, 784 <vscale x 4 x i1>, 785 i64); 786 787define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 788; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: 789; CHECK: # %bb.0: # %entry 790; CHECK-NEXT: fmv.h.x ft0, a0 791; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 792; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 793; CHECK-NEXT: ret 794entry: 795 %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16( 796 <vscale x 4 x half> %0, 797 <vscale x 4 x half> %1, 798 half %2, 799 <vscale x 4 x i1> %3, 800 i64 %4) 801 802 ret <vscale x 4 x half> %a 803} 804 805declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16( 806 <vscale x 8 x half>, 807 half, 808 i64); 809 810define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind { 811; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: 812; CHECK: # %bb.0: # %entry 813; CHECK-NEXT: fmv.h.x ft0, a0 814; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 815; CHECK-NEXT: vfmin.vf v8, v8, ft0 816; CHECK-NEXT: ret 817entry: 818 %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16( 819 <vscale x 8 x half> %0, 820 half %1, 821 i64 %2) 822 823 ret <vscale x 8 x half> %a 824} 825 826declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16( 827 <vscale x 8 x half>, 828 <vscale x 8 x half>, 829 half, 830 <vscale x 8 x i1>, 831 i64); 832 833define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 834; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: 835; CHECK: # %bb.0: # %entry 836; CHECK-NEXT: fmv.h.x ft0, a0 837; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 838; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t 839; CHECK-NEXT: ret 840entry: 841 %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16( 842 <vscale x 8 x half> %0, 843 <vscale x 8 x half> %1, 844 half %2, 845 <vscale x 8 x i1> %3, 846 i64 %4) 847 848 ret <vscale x 8 x half> %a 849} 850 851declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16( 852 <vscale x 16 x half>, 853 half, 854 i64); 855 856define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind { 857; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: 858; CHECK: # %bb.0: # %entry 859; CHECK-NEXT: fmv.h.x ft0, a0 860; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 861; CHECK-NEXT: vfmin.vf v8, v8, ft0 862; CHECK-NEXT: ret 863entry: 864 %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16( 865 <vscale x 16 x half> %0, 866 half %1, 867 i64 %2) 868 869 ret <vscale x 16 x half> %a 870} 871 872declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16( 873 <vscale x 16 x half>, 874 <vscale x 16 x half>, 875 half, 876 <vscale x 16 x i1>, 877 i64); 878 879define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 880; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: 881; CHECK: # %bb.0: # %entry 882; CHECK-NEXT: fmv.h.x ft0, a0 883; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 884; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t 885; CHECK-NEXT: ret 886entry: 887 %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16( 888 <vscale x 16 x half> %0, 889 <vscale x 16 x half> %1, 890 half %2, 891 <vscale x 16 x i1> %3, 892 i64 %4) 893 894 ret <vscale x 16 x half> %a 895} 896 897declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16( 898 <vscale x 32 x half>, 899 half, 900 i64); 901 902define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind { 903; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: 904; CHECK: # %bb.0: # %entry 905; CHECK-NEXT: fmv.h.x ft0, a0 906; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 907; CHECK-NEXT: vfmin.vf v8, v8, ft0 908; CHECK-NEXT: ret 909entry: 910 %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16( 911 <vscale x 32 x half> %0, 912 half %1, 913 i64 %2) 914 915 ret <vscale x 32 x half> %a 916} 917 918declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16( 919 <vscale x 32 x half>, 920 <vscale x 32 x half>, 921 half, 922 <vscale x 32 x i1>, 923 i64); 924 925define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 926; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: 927; CHECK: # %bb.0: # %entry 928; CHECK-NEXT: fmv.h.x ft0, a0 929; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 930; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t 931; CHECK-NEXT: ret 932entry: 933 %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16( 934 <vscale x 32 x half> %0, 935 <vscale x 32 x half> %1, 936 half %2, 937 <vscale x 32 x i1> %3, 938 i64 %4) 939 940 ret <vscale x 32 x half> %a 941} 942 943declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32( 944 <vscale x 1 x float>, 945 float, 946 i64); 947 948define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind { 949; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: 950; CHECK: # %bb.0: # %entry 951; CHECK-NEXT: fmv.w.x ft0, a0 952; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 953; CHECK-NEXT: vfmin.vf v8, v8, ft0 954; CHECK-NEXT: ret 955entry: 956 %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32( 957 <vscale x 1 x float> %0, 958 float %1, 959 i64 %2) 960 961 ret <vscale x 1 x float> %a 962} 963 964declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32( 965 <vscale x 1 x float>, 966 <vscale x 1 x float>, 967 float, 968 <vscale x 1 x i1>, 969 i64); 970 971define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 972; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: 973; CHECK: # %bb.0: # %entry 974; CHECK-NEXT: fmv.w.x ft0, a0 975; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 976; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 977; CHECK-NEXT: ret 978entry: 979 %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32( 980 <vscale x 1 x float> %0, 981 <vscale x 1 x float> %1, 982 float %2, 983 <vscale x 1 x i1> %3, 984 i64 %4) 985 986 ret <vscale x 1 x float> %a 987} 988 989declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32( 990 <vscale x 2 x float>, 991 float, 992 i64); 993 994define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind { 995; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: 996; CHECK: # %bb.0: # %entry 997; CHECK-NEXT: fmv.w.x ft0, a0 998; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 999; CHECK-NEXT: vfmin.vf v8, v8, ft0 1000; CHECK-NEXT: ret 1001entry: 1002 %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32( 1003 <vscale x 2 x float> %0, 1004 float %1, 1005 i64 %2) 1006 1007 ret <vscale x 2 x float> %a 1008} 1009 1010declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32( 1011 <vscale x 2 x float>, 1012 <vscale x 2 x float>, 1013 float, 1014 <vscale x 2 x i1>, 1015 i64); 1016 1017define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1018; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: 1019; CHECK: # %bb.0: # %entry 1020; CHECK-NEXT: fmv.w.x ft0, a0 1021; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1022; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 1023; CHECK-NEXT: ret 1024entry: 1025 %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32( 1026 <vscale x 2 x float> %0, 1027 <vscale x 2 x float> %1, 1028 float %2, 1029 <vscale x 2 x i1> %3, 1030 i64 %4) 1031 1032 ret <vscale x 2 x float> %a 1033} 1034 1035declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32( 1036 <vscale x 4 x float>, 1037 float, 1038 i64); 1039 1040define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind { 1041; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: 1042; CHECK: # %bb.0: # %entry 1043; CHECK-NEXT: fmv.w.x ft0, a0 1044; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1045; CHECK-NEXT: vfmin.vf v8, v8, ft0 1046; CHECK-NEXT: ret 1047entry: 1048 %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32( 1049 <vscale x 4 x float> %0, 1050 float %1, 1051 i64 %2) 1052 1053 ret <vscale x 4 x float> %a 1054} 1055 1056declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32( 1057 <vscale x 4 x float>, 1058 <vscale x 4 x float>, 1059 float, 1060 <vscale x 4 x i1>, 1061 i64); 1062 1063define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1064; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: 1065; CHECK: # %bb.0: # %entry 1066; CHECK-NEXT: fmv.w.x ft0, a0 1067; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1068; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t 1069; CHECK-NEXT: ret 1070entry: 1071 %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32( 1072 <vscale x 4 x float> %0, 1073 <vscale x 4 x float> %1, 1074 float %2, 1075 <vscale x 4 x i1> %3, 1076 i64 %4) 1077 1078 ret <vscale x 4 x float> %a 1079} 1080 1081declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32( 1082 <vscale x 8 x float>, 1083 float, 1084 i64); 1085 1086define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind { 1087; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: 1088; CHECK: # %bb.0: # %entry 1089; CHECK-NEXT: fmv.w.x ft0, a0 1090; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1091; CHECK-NEXT: vfmin.vf v8, v8, ft0 1092; CHECK-NEXT: ret 1093entry: 1094 %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32( 1095 <vscale x 8 x float> %0, 1096 float %1, 1097 i64 %2) 1098 1099 ret <vscale x 8 x float> %a 1100} 1101 1102declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32( 1103 <vscale x 8 x float>, 1104 <vscale x 8 x float>, 1105 float, 1106 <vscale x 8 x i1>, 1107 i64); 1108 1109define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1110; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: 1111; CHECK: # %bb.0: # %entry 1112; CHECK-NEXT: fmv.w.x ft0, a0 1113; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1114; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t 1115; CHECK-NEXT: ret 1116entry: 1117 %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32( 1118 <vscale x 8 x float> %0, 1119 <vscale x 8 x float> %1, 1120 float %2, 1121 <vscale x 8 x i1> %3, 1122 i64 %4) 1123 1124 ret <vscale x 8 x float> %a 1125} 1126 1127declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32( 1128 <vscale x 16 x float>, 1129 float, 1130 i64); 1131 1132define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind { 1133; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: 1134; CHECK: # %bb.0: # %entry 1135; CHECK-NEXT: fmv.w.x ft0, a0 1136; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1137; CHECK-NEXT: vfmin.vf v8, v8, ft0 1138; CHECK-NEXT: ret 1139entry: 1140 %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32( 1141 <vscale x 16 x float> %0, 1142 float %1, 1143 i64 %2) 1144 1145 ret <vscale x 16 x float> %a 1146} 1147 1148declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32( 1149 <vscale x 16 x float>, 1150 <vscale x 16 x float>, 1151 float, 1152 <vscale x 16 x i1>, 1153 i64); 1154 1155define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 1156; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: 1157; CHECK: # %bb.0: # %entry 1158; CHECK-NEXT: fmv.w.x ft0, a0 1159; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1160; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t 1161; CHECK-NEXT: ret 1162entry: 1163 %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32( 1164 <vscale x 16 x float> %0, 1165 <vscale x 16 x float> %1, 1166 float %2, 1167 <vscale x 16 x i1> %3, 1168 i64 %4) 1169 1170 ret <vscale x 16 x float> %a 1171} 1172 1173declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64( 1174 <vscale x 1 x double>, 1175 double, 1176 i64); 1177 1178define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind { 1179; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: 1180; CHECK: # %bb.0: # %entry 1181; CHECK-NEXT: fmv.d.x ft0, a0 1182; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1183; CHECK-NEXT: vfmin.vf v8, v8, ft0 1184; CHECK-NEXT: ret 1185entry: 1186 %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64( 1187 <vscale x 1 x double> %0, 1188 double %1, 1189 i64 %2) 1190 1191 ret <vscale x 1 x double> %a 1192} 1193 1194declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64( 1195 <vscale x 1 x double>, 1196 <vscale x 1 x double>, 1197 double, 1198 <vscale x 1 x i1>, 1199 i64); 1200 1201define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1202; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: 1203; CHECK: # %bb.0: # %entry 1204; CHECK-NEXT: fmv.d.x ft0, a0 1205; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1206; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t 1207; CHECK-NEXT: ret 1208entry: 1209 %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64( 1210 <vscale x 1 x double> %0, 1211 <vscale x 1 x double> %1, 1212 double %2, 1213 <vscale x 1 x i1> %3, 1214 i64 %4) 1215 1216 ret <vscale x 1 x double> %a 1217} 1218 1219declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64( 1220 <vscale x 2 x double>, 1221 double, 1222 i64); 1223 1224define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind { 1225; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: 1226; CHECK: # %bb.0: # %entry 1227; CHECK-NEXT: fmv.d.x ft0, a0 1228; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1229; CHECK-NEXT: vfmin.vf v8, v8, ft0 1230; CHECK-NEXT: ret 1231entry: 1232 %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64( 1233 <vscale x 2 x double> %0, 1234 double %1, 1235 i64 %2) 1236 1237 ret <vscale x 2 x double> %a 1238} 1239 1240declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64( 1241 <vscale x 2 x double>, 1242 <vscale x 2 x double>, 1243 double, 1244 <vscale x 2 x i1>, 1245 i64); 1246 1247define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1248; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: 1249; CHECK: # %bb.0: # %entry 1250; CHECK-NEXT: fmv.d.x ft0, a0 1251; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1252; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t 1253; CHECK-NEXT: ret 1254entry: 1255 %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64( 1256 <vscale x 2 x double> %0, 1257 <vscale x 2 x double> %1, 1258 double %2, 1259 <vscale x 2 x i1> %3, 1260 i64 %4) 1261 1262 ret <vscale x 2 x double> %a 1263} 1264 1265declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64( 1266 <vscale x 4 x double>, 1267 double, 1268 i64); 1269 1270define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind { 1271; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: 1272; CHECK: # %bb.0: # %entry 1273; CHECK-NEXT: fmv.d.x ft0, a0 1274; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1275; CHECK-NEXT: vfmin.vf v8, v8, ft0 1276; CHECK-NEXT: ret 1277entry: 1278 %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64( 1279 <vscale x 4 x double> %0, 1280 double %1, 1281 i64 %2) 1282 1283 ret <vscale x 4 x double> %a 1284} 1285 1286declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64( 1287 <vscale x 4 x double>, 1288 <vscale x 4 x double>, 1289 double, 1290 <vscale x 4 x i1>, 1291 i64); 1292 1293define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1294; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: 1295; CHECK: # %bb.0: # %entry 1296; CHECK-NEXT: fmv.d.x ft0, a0 1297; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1298; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t 1299; CHECK-NEXT: ret 1300entry: 1301 %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64( 1302 <vscale x 4 x double> %0, 1303 <vscale x 4 x double> %1, 1304 double %2, 1305 <vscale x 4 x i1> %3, 1306 i64 %4) 1307 1308 ret <vscale x 4 x double> %a 1309} 1310 1311declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64( 1312 <vscale x 8 x double>, 1313 double, 1314 i64); 1315 1316define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind { 1317; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: 1318; CHECK: # %bb.0: # %entry 1319; CHECK-NEXT: fmv.d.x ft0, a0 1320; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1321; CHECK-NEXT: vfmin.vf v8, v8, ft0 1322; CHECK-NEXT: ret 1323entry: 1324 %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64( 1325 <vscale x 8 x double> %0, 1326 double %1, 1327 i64 %2) 1328 1329 ret <vscale x 8 x double> %a 1330} 1331 1332declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64( 1333 <vscale x 8 x double>, 1334 <vscale x 8 x double>, 1335 double, 1336 <vscale x 8 x i1>, 1337 i64); 1338 1339define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1340; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: 1341; CHECK: # %bb.0: # %entry 1342; CHECK-NEXT: fmv.d.x ft0, a0 1343; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1344; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t 1345; CHECK-NEXT: ret 1346entry: 1347 %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64( 1348 <vscale x 8 x double> %0, 1349 <vscale x 8 x double> %1, 1350 double %2, 1351 <vscale x 8 x i1> %3, 1352 i64 %4) 1353 1354 ret <vscale x 8 x double> %a 1355} 1356