1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc %s -mtriple=riscv64 -mattr=experimental-v -riscv-v-vector-bits-min=128 -run-pass=finalize-isel -o - | FileCheck %s 3 4# This test makes sure we peak through the COPY instruction between the 5# IMPLICIT_DEF and PseudoVLE64_V_M8_MASK in order to select the tail agnostic 6# policy. The test is working if the second argument to PseudoVSETVLI has bit 6 7# set. 8 9--- | 10 ; ModuleID = 'test.ll' 11 source_filename = "test.ll" 12 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" 13 target triple = "riscv64" 14 15 ; Function Attrs: nounwind 16 define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) #0 { 17 %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef) 18 ret <vscale x 8 x i64> %load 19 } 20 21 ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn 22 declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1 23 24 attributes #0 = { nounwind "target-features"="+experimental-v" } 25 attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+experimental-v" } 26 27... 28--- 29name: masked_load_nxv8i64 30alignment: 4 31tracksRegLiveness: true 32registers: 33 - { id: 0, class: gpr } 34 - { id: 1, class: vr } 35 - { id: 2, class: vrm8nov0 } 36 - { id: 3, class: vrm8 } 37 - { id: 4, class: vrm8nov0 } 38liveins: 39 - { reg: '$x10', virtual-reg: '%0' } 40 - { reg: '$v0', virtual-reg: '%1' } 41frameInfo: 42 maxAlignment: 1 43machineFunctionInfo: {} 44body: | 45 bb.0 (%ir-block.0): 46 liveins: $x10, $v0 47 48 ; CHECK-LABEL: name: masked_load_nxv8i64 49 ; CHECK: liveins: $x10, $v0 50 ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0 51 ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $x10 52 ; CHECK: $v0 = COPY [[COPY]] 53 ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF 54 ; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] 55 ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) 56 ; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] 57 ; CHECK: PseudoRET implicit $v8m8 58 %1:vr = COPY $v0 59 %0:gpr = COPY $x10 60 $v0 = COPY %1 61 %3:vrm8 = IMPLICIT_DEF 62 %4:vrm8nov0 = COPY %3 63 %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) 64 $v8m8 = COPY %2 65 PseudoRET implicit $v8m8 66 67... 68