1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare i64 @llvm.riscv.vfirst.i64.nxv1i1( 5 <vscale x 1 x i1>, 6 i64); 7 8define i64 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind { 9; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 12; CHECK-NEXT: vfirst.m a0, v0 13; CHECK-NEXT: ret 14entry: 15 %a = call i64 @llvm.riscv.vfirst.i64.nxv1i1( 16 <vscale x 1 x i1> %0, 17 i64 %1) 18 19 ret i64 %a 20} 21 22declare i64 @llvm.riscv.vfirst.mask.i64.nxv1i1( 23 <vscale x 1 x i1>, 24 <vscale x 1 x i1>, 25 i64); 26 27define i64 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind { 28; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1: 29; CHECK: # %bb.0: # %entry 30; CHECK-NEXT: vmv1r.v v25, v0 31; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 32; CHECK-NEXT: vmv1r.v v0, v8 33; CHECK-NEXT: vfirst.m a0, v25, v0.t 34; CHECK-NEXT: ret 35entry: 36 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1( 37 <vscale x 1 x i1> %0, 38 <vscale x 1 x i1> %1, 39 i64 %2) 40 41 ret i64 %a 42} 43 44declare i64 @llvm.riscv.vfirst.i64.nxv2i1( 45 <vscale x 2 x i1>, 46 i64); 47 48define i64 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind { 49; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1: 50; CHECK: # %bb.0: # %entry 51; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 52; CHECK-NEXT: vfirst.m a0, v0 53; CHECK-NEXT: ret 54entry: 55 %a = call i64 @llvm.riscv.vfirst.i64.nxv2i1( 56 <vscale x 2 x i1> %0, 57 i64 %1) 58 59 ret i64 %a 60} 61 62declare i64 @llvm.riscv.vfirst.mask.i64.nxv2i1( 63 <vscale x 2 x i1>, 64 <vscale x 2 x i1>, 65 i64); 66 67define i64 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind { 68; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1: 69; CHECK: # %bb.0: # %entry 70; CHECK-NEXT: vmv1r.v v25, v0 71; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 72; CHECK-NEXT: vmv1r.v v0, v8 73; CHECK-NEXT: vfirst.m a0, v25, v0.t 74; CHECK-NEXT: ret 75entry: 76 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1( 77 <vscale x 2 x i1> %0, 78 <vscale x 2 x i1> %1, 79 i64 %2) 80 81 ret i64 %a 82} 83 84declare i64 @llvm.riscv.vfirst.i64.nxv4i1( 85 <vscale x 4 x i1>, 86 i64); 87 88define i64 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind { 89; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1: 90; CHECK: # %bb.0: # %entry 91; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 92; CHECK-NEXT: vfirst.m a0, v0 93; CHECK-NEXT: ret 94entry: 95 %a = call i64 @llvm.riscv.vfirst.i64.nxv4i1( 96 <vscale x 4 x i1> %0, 97 i64 %1) 98 99 ret i64 %a 100} 101 102declare i64 @llvm.riscv.vfirst.mask.i64.nxv4i1( 103 <vscale x 4 x i1>, 104 <vscale x 4 x i1>, 105 i64); 106 107define i64 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind { 108; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1: 109; CHECK: # %bb.0: # %entry 110; CHECK-NEXT: vmv1r.v v25, v0 111; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 112; CHECK-NEXT: vmv1r.v v0, v8 113; CHECK-NEXT: vfirst.m a0, v25, v0.t 114; CHECK-NEXT: ret 115entry: 116 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1( 117 <vscale x 4 x i1> %0, 118 <vscale x 4 x i1> %1, 119 i64 %2) 120 121 ret i64 %a 122} 123 124declare i64 @llvm.riscv.vfirst.i64.nxv8i1( 125 <vscale x 8 x i1>, 126 i64); 127 128define i64 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind { 129; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1: 130; CHECK: # %bb.0: # %entry 131; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 132; CHECK-NEXT: vfirst.m a0, v0 133; CHECK-NEXT: ret 134entry: 135 %a = call i64 @llvm.riscv.vfirst.i64.nxv8i1( 136 <vscale x 8 x i1> %0, 137 i64 %1) 138 139 ret i64 %a 140} 141 142declare i64 @llvm.riscv.vfirst.mask.i64.nxv8i1( 143 <vscale x 8 x i1>, 144 <vscale x 8 x i1>, 145 i64); 146 147define i64 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind { 148; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1: 149; CHECK: # %bb.0: # %entry 150; CHECK-NEXT: vmv1r.v v25, v0 151; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 152; CHECK-NEXT: vmv1r.v v0, v8 153; CHECK-NEXT: vfirst.m a0, v25, v0.t 154; CHECK-NEXT: ret 155entry: 156 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1( 157 <vscale x 8 x i1> %0, 158 <vscale x 8 x i1> %1, 159 i64 %2) 160 161 ret i64 %a 162} 163 164declare i64 @llvm.riscv.vfirst.i64.nxv16i1( 165 <vscale x 16 x i1>, 166 i64); 167 168define i64 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind { 169; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1: 170; CHECK: # %bb.0: # %entry 171; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 172; CHECK-NEXT: vfirst.m a0, v0 173; CHECK-NEXT: ret 174entry: 175 %a = call i64 @llvm.riscv.vfirst.i64.nxv16i1( 176 <vscale x 16 x i1> %0, 177 i64 %1) 178 179 ret i64 %a 180} 181 182declare i64 @llvm.riscv.vfirst.mask.i64.nxv16i1( 183 <vscale x 16 x i1>, 184 <vscale x 16 x i1>, 185 i64); 186 187define i64 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind { 188; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1: 189; CHECK: # %bb.0: # %entry 190; CHECK-NEXT: vmv1r.v v25, v0 191; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 192; CHECK-NEXT: vmv1r.v v0, v8 193; CHECK-NEXT: vfirst.m a0, v25, v0.t 194; CHECK-NEXT: ret 195entry: 196 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1( 197 <vscale x 16 x i1> %0, 198 <vscale x 16 x i1> %1, 199 i64 %2) 200 201 ret i64 %a 202} 203 204declare i64 @llvm.riscv.vfirst.i64.nxv32i1( 205 <vscale x 32 x i1>, 206 i64); 207 208define i64 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind { 209; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1: 210; CHECK: # %bb.0: # %entry 211; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 212; CHECK-NEXT: vfirst.m a0, v0 213; CHECK-NEXT: ret 214entry: 215 %a = call i64 @llvm.riscv.vfirst.i64.nxv32i1( 216 <vscale x 32 x i1> %0, 217 i64 %1) 218 219 ret i64 %a 220} 221 222declare i64 @llvm.riscv.vfirst.mask.i64.nxv32i1( 223 <vscale x 32 x i1>, 224 <vscale x 32 x i1>, 225 i64); 226 227define i64 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind { 228; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1: 229; CHECK: # %bb.0: # %entry 230; CHECK-NEXT: vmv1r.v v25, v0 231; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 232; CHECK-NEXT: vmv1r.v v0, v8 233; CHECK-NEXT: vfirst.m a0, v25, v0.t 234; CHECK-NEXT: ret 235entry: 236 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1( 237 <vscale x 32 x i1> %0, 238 <vscale x 32 x i1> %1, 239 i64 %2) 240 241 ret i64 %a 242} 243 244declare i64 @llvm.riscv.vfirst.i64.nxv64i1( 245 <vscale x 64 x i1>, 246 i64); 247 248define i64 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind { 249; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1: 250; CHECK: # %bb.0: # %entry 251; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 252; CHECK-NEXT: vfirst.m a0, v0 253; CHECK-NEXT: ret 254entry: 255 %a = call i64 @llvm.riscv.vfirst.i64.nxv64i1( 256 <vscale x 64 x i1> %0, 257 i64 %1) 258 259 ret i64 %a 260} 261 262declare i64 @llvm.riscv.vfirst.mask.i64.nxv64i1( 263 <vscale x 64 x i1>, 264 <vscale x 64 x i1>, 265 i64); 266 267define i64 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind { 268; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1: 269; CHECK: # %bb.0: # %entry 270; CHECK-NEXT: vmv1r.v v25, v0 271; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 272; CHECK-NEXT: vmv1r.v v0, v8 273; CHECK-NEXT: vfirst.m a0, v25, v0.t 274; CHECK-NEXT: ret 275entry: 276 %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1( 277 <vscale x 64 x i1> %0, 278 <vscale x 64 x i1> %1, 279 i64 %2) 280 281 ret i64 %a 282} 283