1; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s 2>%t | FileCheck %s 2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t 3 4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. 5; WARN-NOT: warning 6 7; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 8define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 9; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32: 10; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7] 11; CHECK-NEXT: ret 12 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1) 13 ret void 14} 15 16; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 17define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 18; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64: 19; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7] 20; CHECK-NEXT: ret 21 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1) 22 ret void 23} 24 25; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 26define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 27; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32: 28; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6] 29; CHECK-NEXT: ret 30 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1) 31 ret void 32} 33 34; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 35define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 36; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64: 37; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6] 38; CHECK-NEXT: ret 39 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1) 40 ret void 41} 42 43; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 44define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 45; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32: 46; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12] 47; CHECK-NEXT: ret 48 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1) 49 ret void 50} 51 52; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 53define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 54; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64: 55; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12] 56; CHECK-NEXT: ret 57 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1) 58 ret void 59} 60 61; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 62define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 63; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32: 64; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16] 65; CHECK-NEXT: ret 66 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1) 67 ret void 68} 69 70; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 71define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 72; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64: 73; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16] 74; CHECK-NEXT: ret 75 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1) 76 ret void 77} 78 79declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 80declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 81declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 82declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 83declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 84declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 85declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 86declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 87