1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1( 5 i64); 6 7define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind { 8; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: 9; CHECK: # %bb.0: # %entry 10; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 11; CHECK-NEXT: vmclr.m v0 12; CHECK-NEXT: ret 13entry: 14 %a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1( 15 i64 %0) 16 17 ret <vscale x 1 x i1> %a 18} 19 20declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1( 21 i64); 22 23define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind { 24; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: 25; CHECK: # %bb.0: # %entry 26; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 27; CHECK-NEXT: vmclr.m v0 28; CHECK-NEXT: ret 29entry: 30 %a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1( 31 i64 %0) 32 33 ret <vscale x 2 x i1> %a 34} 35 36declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1( 37 i64); 38 39define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind { 40; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: 41; CHECK: # %bb.0: # %entry 42; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 43; CHECK-NEXT: vmclr.m v0 44; CHECK-NEXT: ret 45entry: 46 %a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1( 47 i64 %0) 48 49 ret <vscale x 4 x i1> %a 50} 51 52declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1( 53 i64); 54 55define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind { 56; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: 57; CHECK: # %bb.0: # %entry 58; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 59; CHECK-NEXT: vmclr.m v0 60; CHECK-NEXT: ret 61entry: 62 %a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1( 63 i64 %0) 64 65 ret <vscale x 8 x i1> %a 66} 67 68declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1( 69 i64); 70 71define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind { 72; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: 73; CHECK: # %bb.0: # %entry 74; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 75; CHECK-NEXT: vmclr.m v0 76; CHECK-NEXT: ret 77entry: 78 %a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1( 79 i64 %0) 80 81 ret <vscale x 16 x i1> %a 82} 83 84declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1( 85 i64); 86 87define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind { 88; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: 89; CHECK: # %bb.0: # %entry 90; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 91; CHECK-NEXT: vmclr.m v0 92; CHECK-NEXT: ret 93entry: 94 %a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1( 95 i64 %0) 96 97 ret <vscale x 32 x i1> %a 98} 99 100declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1( 101 i64); 102 103define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind { 104; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: 105; CHECK: # %bb.0: # %entry 106; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 107; CHECK-NEXT: vmclr.m v0 108; CHECK-NEXT: ret 109entry: 110 %a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1( 111 i64 %0) 112 113 ret <vscale x 64 x i1> %a 114} 115