1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ 3; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ 4; RUN: FileCheck %s 5; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ 6; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ 7; RUN: FileCheck %s 8 9; This test case aims to test the vector divide instructions on Power10. 10; This includes the low order and extended versions of vector divide, 11; that operate on signed and unsigned words and doublewords. 12; This also includes 128 bit vector divide instructions. 13 14define <2 x i64> @test_vdivud(<2 x i64> %a, <2 x i64> %b) { 15; CHECK-LABEL: test_vdivud: 16; CHECK: # %bb.0: # %entry 17; CHECK-NEXT: vdivud v2, v2, v3 18; CHECK-NEXT: blr 19entry: 20 %div = udiv <2 x i64> %a, %b 21 ret <2 x i64> %div 22} 23 24define <2 x i64> @test_vdivsd(<2 x i64> %a, <2 x i64> %b) { 25; CHECK-LABEL: test_vdivsd: 26; CHECK: # %bb.0: # %entry 27; CHECK-NEXT: vdivsd v2, v2, v3 28; CHECK-NEXT: blr 29entry: 30 %div = sdiv <2 x i64> %a, %b 31 ret <2 x i64> %div 32} 33 34define <4 x i32> @test_vdivuw(<4 x i32> %a, <4 x i32> %b) { 35; CHECK-LABEL: test_vdivuw: 36; CHECK: # %bb.0: # %entry 37; CHECK-NEXT: vdivuw v2, v2, v3 38; CHECK-NEXT: blr 39entry: 40 %div = udiv <4 x i32> %a, %b 41 ret <4 x i32> %div 42} 43 44define <4 x i32> @test_vdivsw(<4 x i32> %a, <4 x i32> %b) { 45; CHECK-LABEL: test_vdivsw: 46; CHECK: # %bb.0: # %entry 47; CHECK-NEXT: vdivsw v2, v2, v3 48; CHECK-NEXT: blr 49entry: 50 %div = sdiv <4 x i32> %a, %b 51 ret <4 x i32> %div 52} 53 54; Test the vector divide extended intrinsics. 55declare <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32>, <4 x i32>) 56declare <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32>, <4 x i32>) 57declare <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64>, <2 x i64>) 58declare <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64>, <2 x i64>) 59 60define <4 x i32> @test_vdivesw(<4 x i32> %a, <4 x i32> %b) { 61; CHECK-LABEL: test_vdivesw: 62; CHECK: # %bb.0: # %entry 63; CHECK-NEXT: vdivesw v2, v2, v3 64; CHECK-NEXT: blr 65entry: 66 %div = tail call <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32> %a, <4 x i32> %b) 67 ret <4 x i32> %div 68} 69 70define <4 x i32> @test_vdiveuw(<4 x i32> %a, <4 x i32> %b) { 71; CHECK-LABEL: test_vdiveuw: 72; CHECK: # %bb.0: # %entry 73; CHECK-NEXT: vdiveuw v2, v2, v3 74; CHECK-NEXT: blr 75entry: 76 %div = tail call <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32> %a, <4 x i32> %b) 77 ret <4 x i32> %div 78} 79 80define <1 x i128> @test_vdivsq(<1 x i128> %x, <1 x i128> %y) nounwind readnone { 81; CHECK-LABEL: test_vdivsq: 82; CHECK: # %bb.0: 83; CHECK-NEXT: vdivsq v2, v2, v3 84; CHECK-NEXT: blr 85 %tmp = sdiv <1 x i128> %x, %y 86 ret <1 x i128> %tmp 87} 88 89define <1 x i128> @test_vdivuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone { 90; CHECK-LABEL: test_vdivuq: 91; CHECK: # %bb.0: 92; CHECK-NEXT: vdivuq v2, v2, v3 93; CHECK-NEXT: blr 94 %tmp = udiv <1 x i128> %x, %y 95 ret <1 x i128> %tmp 96} 97 98define <2 x i64> @test_vdivesd(<2 x i64> %a, <2 x i64> %b) { 99; CHECK-LABEL: test_vdivesd: 100; CHECK: # %bb.0: # %entry 101; CHECK-NEXT: vdivesd v2, v2, v3 102; CHECK-NEXT: blr 103entry: 104 %div = tail call <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64> %a, <2 x i64> %b) 105 ret <2 x i64> %div 106} 107 108define <2 x i64> @test_vdiveud(<2 x i64> %a, <2 x i64> %b) { 109; CHECK-LABEL: test_vdiveud: 110; CHECK: # %bb.0: # %entry 111; CHECK-NEXT: vdiveud v2, v2, v3 112; CHECK-NEXT: blr 113entry: 114 %div = tail call <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64> %a, <2 x i64> %b) 115 ret <2 x i64> %div 116} 117 118declare <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128>, <1 x i128>) nounwind readnone 119declare <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128>, <1 x i128>) nounwind readnone 120 121define <1 x i128> @test_vdivesq(<1 x i128> %x, <1 x i128> %y) nounwind readnone { 122; CHECK-LABEL: test_vdivesq: 123; CHECK: # %bb.0: 124; CHECK-NEXT: vdivesq v2, v2, v3 125; CHECK-NEXT: blr 126 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128> %x, <1 x i128> %y) 127 ret <1 x i128> %tmp 128} 129 130 131define <1 x i128> @test_vdiveuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone { 132; CHECK-LABEL: test_vdiveuq: 133; CHECK: # %bb.0: 134; CHECK-NEXT: vdiveuq v2, v2, v3 135; CHECK-NEXT: blr 136 %tmp = call <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128> %x, <1 x i128> %y) 137 ret <1 x i128> %tmp 138} 139