1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ 3; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ 4; RUN: FileCheck %s 5; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ 6; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ 7; RUN: FileCheck %s 8 9; This test case aims to test the vector mask manipulation operations 10; on Power10. 11 12declare i32 @llvm.ppc.altivec.vextractbm(<16 x i8>) 13declare i32 @llvm.ppc.altivec.vextracthm(<8 x i16>) 14declare i32 @llvm.ppc.altivec.vextractwm(<4 x i32>) 15declare i32 @llvm.ppc.altivec.vextractdm(<2 x i64>) 16declare i32 @llvm.ppc.altivec.vextractqm(<1 x i128>) 17 18define i32 @test_vextractbm(<16 x i8> %a) { 19; CHECK-LABEL: test_vextractbm: 20; CHECK: # %bb.0: # %entry 21; CHECK-NEXT: vextractbm r3, v2 22; CHECK-NEXT: blr 23entry: 24 %ext = tail call i32 @llvm.ppc.altivec.vextractbm(<16 x i8> %a) 25 ret i32 %ext 26} 27 28define i32 @test_vextracthm(<8 x i16> %a) { 29; CHECK-LABEL: test_vextracthm: 30; CHECK: # %bb.0: # %entry 31; CHECK-NEXT: vextracthm r3, v2 32; CHECK-NEXT: blr 33entry: 34 %ext = tail call i32 @llvm.ppc.altivec.vextracthm(<8 x i16> %a) 35 ret i32 %ext 36} 37 38define i32 @test_vextractwm(<4 x i32> %a) { 39; CHECK-LABEL: test_vextractwm: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vextractwm r3, v2 42; CHECK-NEXT: blr 43entry: 44 %ext = tail call i32 @llvm.ppc.altivec.vextractwm(<4 x i32> %a) 45 ret i32 %ext 46} 47 48define i32 @test_vextractdm(<2 x i64> %a) { 49; CHECK-LABEL: test_vextractdm: 50; CHECK: # %bb.0: # %entry 51; CHECK-NEXT: vextractdm r3, v2 52; CHECK-NEXT: blr 53entry: 54 %ext = tail call i32 @llvm.ppc.altivec.vextractdm(<2 x i64> %a) 55 ret i32 %ext 56} 57 58define i32 @test_vextractqm(<1 x i128> %a) { 59; CHECK-LABEL: test_vextractqm: 60; CHECK: # %bb.0: # %entry 61; CHECK-NEXT: vextractqm r3, v2 62; CHECK-NEXT: blr 63entry: 64 %ext = tail call i32 @llvm.ppc.altivec.vextractqm(<1 x i128> %a) 65 ret i32 %ext 66} 67 68declare <16 x i8> @llvm.ppc.altivec.vexpandbm(<16 x i8>) 69declare <8 x i16> @llvm.ppc.altivec.vexpandhm(<8 x i16>) 70declare <4 x i32> @llvm.ppc.altivec.vexpandwm(<4 x i32>) 71declare <2 x i64> @llvm.ppc.altivec.vexpanddm(<2 x i64>) 72declare <1 x i128> @llvm.ppc.altivec.vexpandqm(<1 x i128>) 73 74define <16 x i8> @test_vexpandbm(<16 x i8> %a) { 75; CHECK-LABEL: test_vexpandbm: 76; CHECK: # %bb.0: # %entry 77; CHECK-NEXT: vexpandbm v2, v2 78; CHECK-NEXT: blr 79entry: 80 %exp = tail call <16 x i8> @llvm.ppc.altivec.vexpandbm(<16 x i8> %a) 81 ret <16 x i8> %exp 82} 83 84define <8 x i16> @test_vexpandhm(<8 x i16> %a) { 85; CHECK-LABEL: test_vexpandhm: 86; CHECK: # %bb.0: # %entry 87; CHECK-NEXT: vexpandhm v2, v2 88; CHECK-NEXT: blr 89entry: 90 %exp = tail call <8 x i16> @llvm.ppc.altivec.vexpandhm(<8 x i16> %a) 91 ret <8 x i16> %exp 92} 93 94define <4 x i32> @test_vexpandwm(<4 x i32> %a) { 95; CHECK-LABEL: test_vexpandwm: 96; CHECK: # %bb.0: # %entry 97; CHECK-NEXT: vexpandwm v2, v2 98; CHECK-NEXT: blr 99entry: 100 %exp = tail call <4 x i32> @llvm.ppc.altivec.vexpandwm(<4 x i32> %a) 101 ret <4 x i32> %exp 102} 103 104define <2 x i64> @test_vexpanddm(<2 x i64> %a) { 105; CHECK-LABEL: test_vexpanddm: 106; CHECK: # %bb.0: # %entry 107; CHECK-NEXT: vexpanddm v2, v2 108; CHECK-NEXT: blr 109entry: 110 %exp = tail call <2 x i64> @llvm.ppc.altivec.vexpanddm(<2 x i64> %a) 111 ret <2 x i64> %exp 112} 113 114define <1 x i128> @test_vexpandqm(<1 x i128> %a) { 115; CHECK-LABEL: test_vexpandqm: 116; CHECK: # %bb.0: # %entry 117; CHECK-NEXT: vexpandqm v2, v2 118; CHECK-NEXT: blr 119entry: 120 %exp = tail call <1 x i128> @llvm.ppc.altivec.vexpandqm(<1 x i128> %a) 121 ret <1 x i128> %exp 122} 123 124declare i64 @llvm.ppc.altivec.vcntmbb(<16 x i8>, i32) 125declare i64 @llvm.ppc.altivec.vcntmbh(<8 x i16>, i32) 126declare i64 @llvm.ppc.altivec.vcntmbw(<4 x i32>, i32) 127declare i64 @llvm.ppc.altivec.vcntmbd(<2 x i64>, i32) 128 129define i64 @test_vcntmbb(<16 x i8> %a) { 130; CHECK-LABEL: test_vcntmbb: 131; CHECK: # %bb.0: # %entry 132; CHECK-NEXT: vcntmbb r3, v2, 1 133; CHECK-NEXT: blr 134entry: 135 %cnt = tail call i64 @llvm.ppc.altivec.vcntmbb(<16 x i8> %a, i32 1) 136 ret i64 %cnt 137} 138 139define i64 @test_vcntmbh(<8 x i16> %a) { 140; CHECK-LABEL: test_vcntmbh: 141; CHECK: # %bb.0: # %entry 142; CHECK-NEXT: vcntmbh r3, v2, 0 143; CHECK-NEXT: blr 144entry: 145 %cnt = tail call i64 @llvm.ppc.altivec.vcntmbh(<8 x i16> %a, i32 0) 146 ret i64 %cnt 147} 148 149define i64 @test_vcntmbw(<4 x i32> %a) { 150; CHECK-LABEL: test_vcntmbw: 151; CHECK: # %bb.0: # %entry 152; CHECK-NEXT: vcntmbw r3, v2, 1 153; CHECK-NEXT: blr 154entry: 155 %cnt = tail call i64 @llvm.ppc.altivec.vcntmbw(<4 x i32> %a, i32 1) 156 ret i64 %cnt 157} 158 159define i64 @test_vcntmbd(<2 x i64> %a) { 160; CHECK-LABEL: test_vcntmbd: 161; CHECK: # %bb.0: # %entry 162; CHECK-NEXT: vcntmbd r3, v2, 0 163; CHECK-NEXT: blr 164entry: 165 %cnt = tail call i64 @llvm.ppc.altivec.vcntmbd(<2 x i64> %a, i32 0) 166 ret i64 %cnt 167} 168 169declare <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64) 170declare <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64) 171declare <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64) 172declare <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64) 173declare <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64) 174 175define <16 x i8> @test_mtvsrbm(i64 %a) { 176; CHECK-LABEL: test_mtvsrbm: 177; CHECK: # %bb.0: # %entry 178; CHECK-NEXT: mtvsrbm v2, r3 179; CHECK-NEXT: blr 180entry: 181 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 %a) 182 ret <16 x i8> %mv 183} 184 185define <16 x i8> @test_mtvsrbmi() { 186; CHECK-LABEL: test_mtvsrbmi: 187; CHECK: # %bb.0: # %entry 188; CHECK-NEXT: mtvsrbmi v2, 1 189; CHECK-NEXT: blr 190entry: 191 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 1) 192 ret <16 x i8> %mv 193} 194 195define <16 x i8> @test_mtvsrbmi2() { 196; CHECK-LABEL: test_mtvsrbmi2: 197; CHECK: # %bb.0: # %entry 198; CHECK-NEXT: mtvsrbmi v2, 255 199; CHECK-NEXT: blr 200entry: 201 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 255) 202 ret <16 x i8> %mv 203} 204 205define <16 x i8> @test_mtvsrbmi3() { 206; CHECK-LABEL: test_mtvsrbmi3: 207; CHECK: # %bb.0: # %entry 208; CHECK-NEXT: mtvsrbmi v2, 65535 209; CHECK-NEXT: blr 210entry: 211 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65535) 212 ret <16 x i8> %mv 213} 214 215define <16 x i8> @test_mtvsrbmi4() { 216; CHECK-LABEL: test_mtvsrbmi4: 217; CHECK: # %bb.0: # %entry 218; CHECK-NEXT: mtvsrbmi v2, 0 219; CHECK-NEXT: blr 220entry: 221 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65536) 222 ret <16 x i8> %mv 223} 224 225define <16 x i8> @test_mtvsrbmi5() { 226; CHECK-LABEL: test_mtvsrbmi5: 227; CHECK: # %bb.0: # %entry 228; CHECK-NEXT: mtvsrbmi v2, 10 229; CHECK-NEXT: blr 230entry: 231 %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65546) 232 ret <16 x i8> %mv 233} 234 235define <8 x i16> @test_mtvsrhm(i64 %a) { 236; CHECK-LABEL: test_mtvsrhm: 237; CHECK: # %bb.0: # %entry 238; CHECK-NEXT: mtvsrhm v2, r3 239; CHECK-NEXT: blr 240entry: 241 %mv = tail call <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64 %a) 242 ret <8 x i16> %mv 243} 244 245define <4 x i32> @test_mtvsrwm(i64 %a) { 246; CHECK-LABEL: test_mtvsrwm: 247; CHECK: # %bb.0: # %entry 248; CHECK-NEXT: mtvsrwm v2, r3 249; CHECK-NEXT: blr 250entry: 251 %mv = tail call <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64 %a) 252 ret <4 x i32> %mv 253} 254 255define <2 x i64> @test_mtvsrdm(i64 %a) { 256; CHECK-LABEL: test_mtvsrdm: 257; CHECK: # %bb.0: # %entry 258; CHECK-NEXT: mtvsrdm v2, r3 259; CHECK-NEXT: blr 260entry: 261 %mv = tail call <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64 %a) 262 ret <2 x i64> %mv 263} 264 265define <1 x i128> @test_mtvsrqm(i64 %a) { 266; CHECK-LABEL: test_mtvsrqm: 267; CHECK: # %bb.0: # %entry 268; CHECK-NEXT: mtvsrqm v2, r3 269; CHECK-NEXT: blr 270entry: 271 %mv = tail call <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64 %a) 272 ret <1 x i128> %mv 273} 274