1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ 3; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s 4; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ 5; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s \ 6; RUN: --check-prefix=CHECK-BE 7 8; This test checks that LSR properly recognizes lxvp/stxvp as load/store 9; intrinsics to avoid generating x-form instructions instead of d-forms. 10 11declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*) 12declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*) 13define void @foo(i32 zeroext %n, <256 x i1>* %ptr, <256 x i1>* %ptr2) { 14; CHECK-LABEL: foo: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: cmplwi r3, 0 17; CHECK-NEXT: beqlr cr0 18; CHECK-NEXT: # %bb.1: # %for.body.lr.ph 19; CHECK-NEXT: clrldi r6, r3, 32 20; CHECK-NEXT: addi r3, r4, 64 21; CHECK-NEXT: addi r4, r5, 64 22; CHECK-NEXT: mtctr r6 23; CHECK-NEXT: .p2align 4 24; CHECK-NEXT: .LBB0_2: # %for.body 25; CHECK-NEXT: # 26; CHECK-NEXT: lxvp vsp0, -64(r3) 27; CHECK-NEXT: lxvp vsp2, -32(r3) 28; CHECK-NEXT: lxvp vsp4, 0(r3) 29; CHECK-NEXT: lxvp vsp6, 32(r3) 30; CHECK-NEXT: addi r3, r3, 1 31; CHECK-NEXT: stxvp vsp0, -64(r4) 32; CHECK-NEXT: stxvp vsp2, -32(r4) 33; CHECK-NEXT: stxvp vsp4, 0(r4) 34; CHECK-NEXT: stxvp vsp6, 32(r4) 35; CHECK-NEXT: addi r4, r4, 1 36; CHECK-NEXT: bdnz .LBB0_2 37; CHECK-NEXT: # %bb.3: # %for.cond.cleanup 38; CHECK-NEXT: blr 39; 40; CHECK-BE-LABEL: foo: 41; CHECK-BE: # %bb.0: # %entry 42; CHECK-BE-NEXT: cmplwi r3, 0 43; CHECK-BE-NEXT: beqlr cr0 44; CHECK-BE-NEXT: # %bb.1: # %for.body.lr.ph 45; CHECK-BE-NEXT: clrldi r6, r3, 32 46; CHECK-BE-NEXT: addi r3, r4, 64 47; CHECK-BE-NEXT: addi r4, r5, 64 48; CHECK-BE-NEXT: mtctr r6 49; CHECK-BE-NEXT: .p2align 4 50; CHECK-BE-NEXT: .LBB0_2: # %for.body 51; CHECK-BE-NEXT: # 52; CHECK-BE-NEXT: lxvp vsp0, -64(r3) 53; CHECK-BE-NEXT: lxvp vsp2, -32(r3) 54; CHECK-BE-NEXT: lxvp vsp4, 0(r3) 55; CHECK-BE-NEXT: lxvp vsp6, 32(r3) 56; CHECK-BE-NEXT: addi r3, r3, 1 57; CHECK-BE-NEXT: stxvp vsp0, -64(r4) 58; CHECK-BE-NEXT: stxvp vsp2, -32(r4) 59; CHECK-BE-NEXT: stxvp vsp4, 0(r4) 60; CHECK-BE-NEXT: stxvp vsp6, 32(r4) 61; CHECK-BE-NEXT: addi r4, r4, 1 62; CHECK-BE-NEXT: bdnz .LBB0_2 63; CHECK-BE-NEXT: # %bb.3: # %for.cond.cleanup 64; CHECK-BE-NEXT: blr 65entry: 66 %cmp35.not = icmp eq i32 %n, 0 67 br i1 %cmp35.not, label %for.cond.cleanup, label %for.body.lr.ph 68 69for.body.lr.ph: 70 %0 = bitcast <256 x i1>* %ptr to i8* 71 %1 = bitcast <256 x i1>* %ptr2 to i8* 72 %wide.trip.count = zext i32 %n to i64 73 br label %for.body 74 75for.cond.cleanup: 76 ret void 77 78for.body: 79 %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ] 80 %2 = getelementptr i8, i8* %0, i64 %indvars.iv 81 %3 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %2) 82 %add2 = add nuw nsw i64 %indvars.iv, 32 83 %4 = getelementptr i8, i8* %0, i64 %add2 84 %5 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %4) 85 %add4 = add nuw nsw i64 %indvars.iv, 64 86 %6 = getelementptr i8, i8* %0, i64 %add4 87 %7 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %6) 88 %add6 = add nuw nsw i64 %indvars.iv, 96 89 %8 = getelementptr i8, i8* %0, i64 %add6 90 %9 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %8) 91 %10 = getelementptr i8, i8* %1, i64 %indvars.iv 92 tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %3, i8* %10) 93 %11 = getelementptr i8, i8* %1, i64 %add2 94 tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %5, i8* %11) 95 %12 = getelementptr i8, i8* %1, i64 %add4 96 tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %7, i8* %12) 97 %13 = getelementptr i8, i8* %1, i64 %add6 98 tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %9, i8* %13) 99 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 100 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count 101 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body 102} 103 104