1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7define void @test_load_mask_64(<vscale x 64 x i1>* %pa, <vscale x 64 x i1>* %pb) { 8; CHECK-LABEL: test_load_mask_64: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu 11; CHECK-NEXT: vle1.v v25, (a0) 12; CHECK-NEXT: vse1.v v25, (a1) 13; CHECK-NEXT: ret 14 %a = load <vscale x 64 x i1>, <vscale x 64 x i1>* %pa 15 store <vscale x 64 x i1> %a, <vscale x 64 x i1>* %pb 16 ret void 17} 18 19define void @test_load_mask_32(<vscale x 32 x i1>* %pa, <vscale x 32 x i1>* %pb) { 20; CHECK-LABEL: test_load_mask_32: 21; CHECK: # %bb.0: 22; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu 23; CHECK-NEXT: vle1.v v25, (a0) 24; CHECK-NEXT: vse1.v v25, (a1) 25; CHECK-NEXT: ret 26 %a = load <vscale x 32 x i1>, <vscale x 32 x i1>* %pa 27 store <vscale x 32 x i1> %a, <vscale x 32 x i1>* %pb 28 ret void 29} 30 31define void @test_load_mask_16(<vscale x 16 x i1>* %pa, <vscale x 16 x i1>* %pb) { 32; CHECK-LABEL: test_load_mask_16: 33; CHECK: # %bb.0: 34; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu 35; CHECK-NEXT: vle1.v v25, (a0) 36; CHECK-NEXT: vse1.v v25, (a1) 37; CHECK-NEXT: ret 38 %a = load <vscale x 16 x i1>, <vscale x 16 x i1>* %pa 39 store <vscale x 16 x i1> %a, <vscale x 16 x i1>* %pb 40 ret void 41} 42 43define void @test_load_mask_8(<vscale x 8 x i1>* %pa, <vscale x 8 x i1>* %pb) { 44; CHECK-LABEL: test_load_mask_8: 45; CHECK: # %bb.0: 46; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu 47; CHECK-NEXT: vle1.v v25, (a0) 48; CHECK-NEXT: vse1.v v25, (a1) 49; CHECK-NEXT: ret 50 %a = load <vscale x 8 x i1>, <vscale x 8 x i1>* %pa 51 store <vscale x 8 x i1> %a, <vscale x 8 x i1>* %pb 52 ret void 53} 54 55define void @test_load_mask_4(<vscale x 4 x i1>* %pa, <vscale x 4 x i1>* %pb) { 56; CHECK-LABEL: test_load_mask_4: 57; CHECK: # %bb.0: 58; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu 59; CHECK-NEXT: vle1.v v25, (a0) 60; CHECK-NEXT: vse1.v v25, (a1) 61; CHECK-NEXT: ret 62 %a = load <vscale x 4 x i1>, <vscale x 4 x i1>* %pa 63 store <vscale x 4 x i1> %a, <vscale x 4 x i1>* %pb 64 ret void 65} 66 67define void @test_load_mask_2(<vscale x 2 x i1>* %pa, <vscale x 2 x i1>* %pb) { 68; CHECK-LABEL: test_load_mask_2: 69; CHECK: # %bb.0: 70; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu 71; CHECK-NEXT: vle1.v v25, (a0) 72; CHECK-NEXT: vse1.v v25, (a1) 73; CHECK-NEXT: ret 74 %a = load <vscale x 2 x i1>, <vscale x 2 x i1>* %pa 75 store <vscale x 2 x i1> %a, <vscale x 2 x i1>* %pb 76 ret void 77} 78 79define void @test_load_mask_1(<vscale x 1 x i1>* %pa, <vscale x 1 x i1>* %pb) { 80; CHECK-LABEL: test_load_mask_1: 81; CHECK: # %bb.0: 82; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu 83; CHECK-NEXT: vle1.v v25, (a0) 84; CHECK-NEXT: vse1.v v25, (a1) 85; CHECK-NEXT: ret 86 %a = load <vscale x 1 x i1>, <vscale x 1 x i1>* %pa 87 store <vscale x 1 x i1> %a, <vscale x 1 x i1>* %pb 88 ret void 89} 90