1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 2 3;;; Test vector shift right logical intrinsic instructions 4;;; 5;;; Note: 6;;; We test VSRL*vvl, VSRL*vvl_v, VSRL*vrl, VSRL*vrl_v, VSRL*vil, VSRL*vil_v, 7;;; VSRL*vvml_v, VSRL*vrml_v, VSRL*viml_v, PVSRL*vvl, PVSRL*vvl_v, PVSRL*vrl, 8;;; PVSRL*vrl_v, PVSRL*vvml_v, and PVSRL*vrml_v instructions. 9 10; Function Attrs: nounwind readnone 11define fastcc <256 x double> @vsrl_vvvl(<256 x double> %0, <256 x double> %1) { 12; CHECK-LABEL: vsrl_vvvl: 13; CHECK: # %bb.0: 14; CHECK-NEXT: lea %s0, 256 15; CHECK-NEXT: lvl %s0 16; CHECK-NEXT: vsrl %v0, %v0, %v1 17; CHECK-NEXT: b.l.t (, %s10) 18 %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double> %0, <256 x double> %1, i32 256) 19 ret <256 x double> %3 20} 21 22; Function Attrs: nounwind readnone 23declare <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double>, <256 x double>, i32) 24 25; Function Attrs: nounwind readnone 26define fastcc <256 x double> @vsrl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { 27; CHECK-LABEL: vsrl_vvvvl: 28; CHECK: # %bb.0: 29; CHECK-NEXT: lea %s0, 128 30; CHECK-NEXT: lvl %s0 31; CHECK-NEXT: vsrl %v2, %v0, %v1 32; CHECK-NEXT: lea %s16, 256 33; CHECK-NEXT: lvl %s16 34; CHECK-NEXT: vor %v0, (0)1, %v2 35; CHECK-NEXT: b.l.t (, %s10) 36 %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) 37 ret <256 x double> %4 38} 39 40; Function Attrs: nounwind readnone 41declare <256 x double> @llvm.ve.vl.vsrl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 42 43; Function Attrs: nounwind readnone 44define fastcc <256 x double> @vsrl_vvsl(<256 x double> %0, i64 %1) { 45; CHECK-LABEL: vsrl_vvsl: 46; CHECK: # %bb.0: 47; CHECK-NEXT: lea %s1, 256 48; CHECK-NEXT: lvl %s1 49; CHECK-NEXT: vsrl %v0, %v0, %s0 50; CHECK-NEXT: b.l.t (, %s10) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double> %0, i64 %1, i32 256) 52 ret <256 x double> %3 53} 54 55; Function Attrs: nounwind readnone 56declare <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double>, i64, i32) 57 58; Function Attrs: nounwind readnone 59define fastcc <256 x double> @vsrl_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) { 60; CHECK-LABEL: vsrl_vvsvl: 61; CHECK: # %bb.0: 62; CHECK-NEXT: lea %s1, 128 63; CHECK-NEXT: lvl %s1 64; CHECK-NEXT: vsrl %v1, %v0, %s0 65; CHECK-NEXT: lea %s16, 256 66; CHECK-NEXT: lvl %s16 67; CHECK-NEXT: vor %v0, (0)1, %v1 68; CHECK-NEXT: b.l.t (, %s10) 69 %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128) 70 ret <256 x double> %4 71} 72 73; Function Attrs: nounwind readnone 74declare <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double>, i64, <256 x double>, i32) 75 76; Function Attrs: nounwind readnone 77define fastcc <256 x double> @vsrl_vvsl_imm(<256 x double> %0) { 78; CHECK-LABEL: vsrl_vvsl_imm: 79; CHECK: # %bb.0: 80; CHECK-NEXT: lea %s0, 256 81; CHECK-NEXT: lvl %s0 82; CHECK-NEXT: vsrl %v0, %v0, 8 83; CHECK-NEXT: b.l.t (, %s10) 84 %2 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double> %0, i64 8, i32 256) 85 ret <256 x double> %2 86} 87 88; Function Attrs: nounwind readnone 89define fastcc <256 x double> @vsrl_vvsvl_imm(<256 x double> %0, <256 x double> %1) { 90; CHECK-LABEL: vsrl_vvsvl_imm: 91; CHECK: # %bb.0: 92; CHECK-NEXT: lea %s0, 128 93; CHECK-NEXT: lvl %s0 94; CHECK-NEXT: vsrl %v1, %v0, 8 95; CHECK-NEXT: lea %s16, 256 96; CHECK-NEXT: lvl %s16 97; CHECK-NEXT: vor %v0, (0)1, %v1 98; CHECK-NEXT: b.l.t (, %s10) 99 %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128) 100 ret <256 x double> %3 101} 102 103; Function Attrs: nounwind readnone 104define fastcc <256 x double> @vsrl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { 105; CHECK-LABEL: vsrl_vvvmvl: 106; CHECK: # %bb.0: 107; CHECK-NEXT: lea %s0, 128 108; CHECK-NEXT: lvl %s0 109; CHECK-NEXT: vsrl %v2, %v0, %v1, %vm1 110; CHECK-NEXT: lea %s16, 256 111; CHECK-NEXT: lvl %s16 112; CHECK-NEXT: vor %v0, (0)1, %v2 113; CHECK-NEXT: b.l.t (, %s10) 114 %5 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) 115 ret <256 x double> %5 116} 117 118; Function Attrs: nounwind readnone 119declare <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) 120 121; Function Attrs: nounwind readnone 122define fastcc <256 x double> @vsrl_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) { 123; CHECK-LABEL: vsrl_vvsmvl: 124; CHECK: # %bb.0: 125; CHECK-NEXT: lea %s1, 128 126; CHECK-NEXT: lvl %s1 127; CHECK-NEXT: vsrl %v1, %v0, %s0, %vm1 128; CHECK-NEXT: lea %s16, 256 129; CHECK-NEXT: lvl %s16 130; CHECK-NEXT: vor %v0, (0)1, %v1 131; CHECK-NEXT: b.l.t (, %s10) 132 %5 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128) 133 ret <256 x double> %5 134} 135 136; Function Attrs: nounwind readnone 137declare <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32) 138 139; Function Attrs: nounwind readnone 140define fastcc <256 x double> @vsrl_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { 141; CHECK-LABEL: vsrl_vvsmvl_imm: 142; CHECK: # %bb.0: 143; CHECK-NEXT: lea %s0, 128 144; CHECK-NEXT: lvl %s0 145; CHECK-NEXT: vsrl %v1, %v0, 8, %vm1 146; CHECK-NEXT: lea %s16, 256 147; CHECK-NEXT: lvl %s16 148; CHECK-NEXT: vor %v0, (0)1, %v1 149; CHECK-NEXT: b.l.t (, %s10) 150 %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128) 151 ret <256 x double> %4 152} 153 154; Function Attrs: nounwind readnone 155define fastcc <256 x double> @pvsrl_vvvl(<256 x double> %0, <256 x double> %1) { 156; CHECK-LABEL: pvsrl_vvvl: 157; CHECK: # %bb.0: 158; CHECK-NEXT: lea %s0, 256 159; CHECK-NEXT: lvl %s0 160; CHECK-NEXT: pvsrl %v0, %v0, %v1 161; CHECK-NEXT: b.l.t (, %s10) 162 %3 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double> %0, <256 x double> %1, i32 256) 163 ret <256 x double> %3 164} 165 166; Function Attrs: nounwind readnone 167declare <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double>, <256 x double>, i32) 168 169; Function Attrs: nounwind readnone 170define fastcc <256 x double> @pvsrl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { 171; CHECK-LABEL: pvsrl_vvvvl: 172; CHECK: # %bb.0: 173; CHECK-NEXT: lea %s0, 128 174; CHECK-NEXT: lvl %s0 175; CHECK-NEXT: pvsrl %v2, %v0, %v1 176; CHECK-NEXT: lea %s16, 256 177; CHECK-NEXT: lvl %s16 178; CHECK-NEXT: vor %v0, (0)1, %v2 179; CHECK-NEXT: b.l.t (, %s10) 180 %4 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) 181 ret <256 x double> %4 182} 183 184; Function Attrs: nounwind readnone 185declare <256 x double> @llvm.ve.vl.pvsrl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 186 187; Function Attrs: nounwind readnone 188define fastcc <256 x double> @pvsrl_vvsl(<256 x double> %0, i64 %1) { 189; CHECK-LABEL: pvsrl_vvsl: 190; CHECK: # %bb.0: 191; CHECK-NEXT: lea %s1, 256 192; CHECK-NEXT: lvl %s1 193; CHECK-NEXT: pvsrl %v0, %v0, %s0 194; CHECK-NEXT: b.l.t (, %s10) 195 %3 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsl(<256 x double> %0, i64 %1, i32 256) 196 ret <256 x double> %3 197} 198 199; Function Attrs: nounwind readnone 200declare <256 x double> @llvm.ve.vl.pvsrl.vvsl(<256 x double>, i64, i32) 201 202; Function Attrs: nounwind readnone 203define fastcc <256 x double> @pvsrl_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) { 204; CHECK-LABEL: pvsrl_vvsvl: 205; CHECK: # %bb.0: 206; CHECK-NEXT: lea %s1, 128 207; CHECK-NEXT: lvl %s1 208; CHECK-NEXT: pvsrl %v1, %v0, %s0 209; CHECK-NEXT: lea %s16, 256 210; CHECK-NEXT: lvl %s16 211; CHECK-NEXT: vor %v0, (0)1, %v1 212; CHECK-NEXT: b.l.t (, %s10) 213 %4 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128) 214 ret <256 x double> %4 215} 216 217; Function Attrs: nounwind readnone 218declare <256 x double> @llvm.ve.vl.pvsrl.vvsvl(<256 x double>, i64, <256 x double>, i32) 219 220; Function Attrs: nounwind readnone 221define fastcc <256 x double> @pvsrl_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { 222; CHECK-LABEL: pvsrl_vvvMvl: 223; CHECK: # %bb.0: 224; CHECK-NEXT: lea %s0, 128 225; CHECK-NEXT: lvl %s0 226; CHECK-NEXT: pvsrl %v2, %v0, %v1, %vm2 227; CHECK-NEXT: lea %s16, 256 228; CHECK-NEXT: lvl %s16 229; CHECK-NEXT: vor %v0, (0)1, %v2 230; CHECK-NEXT: b.l.t (, %s10) 231 %5 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) 232 ret <256 x double> %5 233} 234 235; Function Attrs: nounwind readnone 236declare <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) 237 238; Function Attrs: nounwind readnone 239define fastcc <256 x double> @pvsrl_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) { 240; CHECK-LABEL: pvsrl_vvsMvl: 241; CHECK: # %bb.0: 242; CHECK-NEXT: lea %s1, 128 243; CHECK-NEXT: lvl %s1 244; CHECK-NEXT: pvsrl %v1, %v0, %s0, %vm2 245; CHECK-NEXT: lea %s16, 256 246; CHECK-NEXT: lvl %s16 247; CHECK-NEXT: vor %v0, (0)1, %v1 248; CHECK-NEXT: b.l.t (, %s10) 249 %5 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128) 250 ret <256 x double> %5 251} 252 253; Function Attrs: nounwind readnone 254declare <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32) 255