1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( 5 <vscale x 1 x half>, 6 i32); 7 8define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind { 9; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 12; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 13; CHECK-NEXT: ret 14entry: 15 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( 16 <vscale x 1 x half> %0, 17 i32 %1) 18 19 ret <vscale x 1 x i16> %a 20} 21 22declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( 23 <vscale x 1 x i16>, 24 <vscale x 1 x half>, 25 <vscale x 1 x i1>, 26 i32); 27 28define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 29; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: 30; CHECK: # %bb.0: # %entry 31; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 32; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 33; CHECK-NEXT: ret 34entry: 35 %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( 36 <vscale x 1 x i16> %0, 37 <vscale x 1 x half> %1, 38 <vscale x 1 x i1> %2, 39 i32 %3) 40 41 ret <vscale x 1 x i16> %a 42} 43 44declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( 45 <vscale x 2 x half>, 46 i32); 47 48define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind { 49; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: 50; CHECK: # %bb.0: # %entry 51; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 52; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 53; CHECK-NEXT: ret 54entry: 55 %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( 56 <vscale x 2 x half> %0, 57 i32 %1) 58 59 ret <vscale x 2 x i16> %a 60} 61 62declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( 63 <vscale x 2 x i16>, 64 <vscale x 2 x half>, 65 <vscale x 2 x i1>, 66 i32); 67 68define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2 x i16> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 69; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: 70; CHECK: # %bb.0: # %entry 71; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 72; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 73; CHECK-NEXT: ret 74entry: 75 %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( 76 <vscale x 2 x i16> %0, 77 <vscale x 2 x half> %1, 78 <vscale x 2 x i1> %2, 79 i32 %3) 80 81 ret <vscale x 2 x i16> %a 82} 83 84declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( 85 <vscale x 4 x half>, 86 i32); 87 88define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind { 89; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: 90; CHECK: # %bb.0: # %entry 91; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 92; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 93; CHECK-NEXT: ret 94entry: 95 %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( 96 <vscale x 4 x half> %0, 97 i32 %1) 98 99 ret <vscale x 4 x i16> %a 100} 101 102declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( 103 <vscale x 4 x i16>, 104 <vscale x 4 x half>, 105 <vscale x 4 x i1>, 106 i32); 107 108define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4 x i16> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 109; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: 110; CHECK: # %bb.0: # %entry 111; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 112; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 113; CHECK-NEXT: ret 114entry: 115 %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( 116 <vscale x 4 x i16> %0, 117 <vscale x 4 x half> %1, 118 <vscale x 4 x i1> %2, 119 i32 %3) 120 121 ret <vscale x 4 x i16> %a 122} 123 124declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( 125 <vscale x 8 x half>, 126 i32); 127 128define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind { 129; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: 130; CHECK: # %bb.0: # %entry 131; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 132; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 133; CHECK-NEXT: ret 134entry: 135 %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( 136 <vscale x 8 x half> %0, 137 i32 %1) 138 139 ret <vscale x 8 x i16> %a 140} 141 142declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( 143 <vscale x 8 x i16>, 144 <vscale x 8 x half>, 145 <vscale x 8 x i1>, 146 i32); 147 148define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8 x i16> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 149; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: 150; CHECK: # %bb.0: # %entry 151; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 152; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t 153; CHECK-NEXT: ret 154entry: 155 %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( 156 <vscale x 8 x i16> %0, 157 <vscale x 8 x half> %1, 158 <vscale x 8 x i1> %2, 159 i32 %3) 160 161 ret <vscale x 8 x i16> %a 162} 163 164declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( 165 <vscale x 16 x half>, 166 i32); 167 168define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind { 169; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: 170; CHECK: # %bb.0: # %entry 171; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 172; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 173; CHECK-NEXT: ret 174entry: 175 %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( 176 <vscale x 16 x half> %0, 177 i32 %1) 178 179 ret <vscale x 16 x i16> %a 180} 181 182declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( 183 <vscale x 16 x i16>, 184 <vscale x 16 x half>, 185 <vscale x 16 x i1>, 186 i32); 187 188define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vscale x 16 x i16> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { 189; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: 190; CHECK: # %bb.0: # %entry 191; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 192; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t 193; CHECK-NEXT: ret 194entry: 195 %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( 196 <vscale x 16 x i16> %0, 197 <vscale x 16 x half> %1, 198 <vscale x 16 x i1> %2, 199 i32 %3) 200 201 ret <vscale x 16 x i16> %a 202} 203 204declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( 205 <vscale x 32 x half>, 206 i32); 207 208define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind { 209; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: 210; CHECK: # %bb.0: # %entry 211; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 212; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 213; CHECK-NEXT: ret 214entry: 215 %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( 216 <vscale x 32 x half> %0, 217 i32 %1) 218 219 ret <vscale x 32 x i16> %a 220} 221 222declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( 223 <vscale x 32 x i16>, 224 <vscale x 32 x half>, 225 <vscale x 32 x i1>, 226 i32); 227 228define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vscale x 32 x i16> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { 229; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: 230; CHECK: # %bb.0: # %entry 231; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu 232; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t 233; CHECK-NEXT: ret 234entry: 235 %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( 236 <vscale x 32 x i16> %0, 237 <vscale x 32 x half> %1, 238 <vscale x 32 x i1> %2, 239 i32 %3) 240 241 ret <vscale x 32 x i16> %a 242} 243 244declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( 245 <vscale x 1 x float>, 246 i32); 247 248define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind { 249; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: 250; CHECK: # %bb.0: # %entry 251; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 252; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 253; CHECK-NEXT: ret 254entry: 255 %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( 256 <vscale x 1 x float> %0, 257 i32 %1) 258 259 ret <vscale x 1 x i32> %a 260} 261 262declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( 263 <vscale x 1 x i32>, 264 <vscale x 1 x float>, 265 <vscale x 1 x i1>, 266 i32); 267 268define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1 x i32> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 269; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: 270; CHECK: # %bb.0: # %entry 271; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 272; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 273; CHECK-NEXT: ret 274entry: 275 %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( 276 <vscale x 1 x i32> %0, 277 <vscale x 1 x float> %1, 278 <vscale x 1 x i1> %2, 279 i32 %3) 280 281 ret <vscale x 1 x i32> %a 282} 283 284declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( 285 <vscale x 2 x float>, 286 i32); 287 288define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind { 289; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: 290; CHECK: # %bb.0: # %entry 291; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 292; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 293; CHECK-NEXT: ret 294entry: 295 %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( 296 <vscale x 2 x float> %0, 297 i32 %1) 298 299 ret <vscale x 2 x i32> %a 300} 301 302declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( 303 <vscale x 2 x i32>, 304 <vscale x 2 x float>, 305 <vscale x 2 x i1>, 306 i32); 307 308define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2 x i32> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 309; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: 310; CHECK: # %bb.0: # %entry 311; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 312; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 313; CHECK-NEXT: ret 314entry: 315 %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( 316 <vscale x 2 x i32> %0, 317 <vscale x 2 x float> %1, 318 <vscale x 2 x i1> %2, 319 i32 %3) 320 321 ret <vscale x 2 x i32> %a 322} 323 324declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( 325 <vscale x 4 x float>, 326 i32); 327 328define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind { 329; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: 330; CHECK: # %bb.0: # %entry 331; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 332; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 333; CHECK-NEXT: ret 334entry: 335 %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( 336 <vscale x 4 x float> %0, 337 i32 %1) 338 339 ret <vscale x 4 x i32> %a 340} 341 342declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( 343 <vscale x 4 x i32>, 344 <vscale x 4 x float>, 345 <vscale x 4 x i1>, 346 i32); 347 348define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4 x i32> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 349; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: 350; CHECK: # %bb.0: # %entry 351; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 352; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t 353; CHECK-NEXT: ret 354entry: 355 %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( 356 <vscale x 4 x i32> %0, 357 <vscale x 4 x float> %1, 358 <vscale x 4 x i1> %2, 359 i32 %3) 360 361 ret <vscale x 4 x i32> %a 362} 363 364declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( 365 <vscale x 8 x float>, 366 i32); 367 368define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind { 369; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: 370; CHECK: # %bb.0: # %entry 371; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 372; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 373; CHECK-NEXT: ret 374entry: 375 %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( 376 <vscale x 8 x float> %0, 377 i32 %1) 378 379 ret <vscale x 8 x i32> %a 380} 381 382declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( 383 <vscale x 8 x i32>, 384 <vscale x 8 x float>, 385 <vscale x 8 x i1>, 386 i32); 387 388define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8 x i32> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 389; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: 390; CHECK: # %bb.0: # %entry 391; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 392; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t 393; CHECK-NEXT: ret 394entry: 395 %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( 396 <vscale x 8 x i32> %0, 397 <vscale x 8 x float> %1, 398 <vscale x 8 x i1> %2, 399 i32 %3) 400 401 ret <vscale x 8 x i32> %a 402} 403 404declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( 405 <vscale x 16 x float>, 406 i32); 407 408define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind { 409; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: 410; CHECK: # %bb.0: # %entry 411; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 412; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 413; CHECK-NEXT: ret 414entry: 415 %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( 416 <vscale x 16 x float> %0, 417 i32 %1) 418 419 ret <vscale x 16 x i32> %a 420} 421 422declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( 423 <vscale x 16 x i32>, 424 <vscale x 16 x float>, 425 <vscale x 16 x i1>, 426 i32); 427 428define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vscale x 16 x i32> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { 429; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: 430; CHECK: # %bb.0: # %entry 431; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu 432; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t 433; CHECK-NEXT: ret 434entry: 435 %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( 436 <vscale x 16 x i32> %0, 437 <vscale x 16 x float> %1, 438 <vscale x 16 x i1> %2, 439 i32 %3) 440 441 ret <vscale x 16 x i32> %a 442} 443 444declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( 445 <vscale x 1 x double>, 446 i32); 447 448define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind { 449; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: 450; CHECK: # %bb.0: # %entry 451; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 452; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 453; CHECK-NEXT: ret 454entry: 455 %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( 456 <vscale x 1 x double> %0, 457 i32 %1) 458 459 ret <vscale x 1 x i64> %a 460} 461 462declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( 463 <vscale x 1 x i64>, 464 <vscale x 1 x double>, 465 <vscale x 1 x i1>, 466 i32); 467 468define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 469; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: 470; CHECK: # %bb.0: # %entry 471; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 472; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t 473; CHECK-NEXT: ret 474entry: 475 %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( 476 <vscale x 1 x i64> %0, 477 <vscale x 1 x double> %1, 478 <vscale x 1 x i1> %2, 479 i32 %3) 480 481 ret <vscale x 1 x i64> %a 482} 483 484declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( 485 <vscale x 2 x double>, 486 i32); 487 488define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind { 489; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: 490; CHECK: # %bb.0: # %entry 491; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 492; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 493; CHECK-NEXT: ret 494entry: 495 %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( 496 <vscale x 2 x double> %0, 497 i32 %1) 498 499 ret <vscale x 2 x i64> %a 500} 501 502declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( 503 <vscale x 2 x i64>, 504 <vscale x 2 x double>, 505 <vscale x 2 x i1>, 506 i32); 507 508define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 509; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: 510; CHECK: # %bb.0: # %entry 511; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 512; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t 513; CHECK-NEXT: ret 514entry: 515 %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( 516 <vscale x 2 x i64> %0, 517 <vscale x 2 x double> %1, 518 <vscale x 2 x i1> %2, 519 i32 %3) 520 521 ret <vscale x 2 x i64> %a 522} 523 524declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( 525 <vscale x 4 x double>, 526 i32); 527 528define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind { 529; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: 530; CHECK: # %bb.0: # %entry 531; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 532; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 533; CHECK-NEXT: ret 534entry: 535 %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( 536 <vscale x 4 x double> %0, 537 i32 %1) 538 539 ret <vscale x 4 x i64> %a 540} 541 542declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( 543 <vscale x 4 x i64>, 544 <vscale x 4 x double>, 545 <vscale x 4 x i1>, 546 i32); 547 548define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 549; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: 550; CHECK: # %bb.0: # %entry 551; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 552; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t 553; CHECK-NEXT: ret 554entry: 555 %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( 556 <vscale x 4 x i64> %0, 557 <vscale x 4 x double> %1, 558 <vscale x 4 x i1> %2, 559 i32 %3) 560 561 ret <vscale x 4 x i64> %a 562} 563 564declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( 565 <vscale x 8 x double>, 566 i32); 567 568define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind { 569; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: 570; CHECK: # %bb.0: # %entry 571; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 572; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 573; CHECK-NEXT: ret 574entry: 575 %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( 576 <vscale x 8 x double> %0, 577 i32 %1) 578 579 ret <vscale x 8 x i64> %a 580} 581 582declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( 583 <vscale x 8 x i64>, 584 <vscale x 8 x double>, 585 <vscale x 8 x i1>, 586 i32); 587 588define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 589; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: 590; CHECK: # %bb.0: # %entry 591; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu 592; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t 593; CHECK-NEXT: ret 594entry: 595 %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( 596 <vscale x 8 x i64> %0, 597 <vscale x 8 x double> %1, 598 <vscale x 8 x i1> %2, 599 i32 %3) 600 601 ret <vscale x 8 x i64> %a 602} 603