1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s 3 4define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind { 5; CHECK-LABEL: testLeftGood8x8: 6; CHECK: // %bb.0: 7; CHECK-NEXT: sli.8b v0, v1, #3 8; CHECK-NEXT: str d0, [x0] 9; CHECK-NEXT: ret 10 %and.i = and <8 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> 11 %vshl_n = shl <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 12 %result = or <8 x i8> %and.i, %vshl_n 13 store <8 x i8> %result, <8 x i8>* %dest, align 8 14 ret void 15} 16 17define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind { 18; CHECK-LABEL: testLeftBad8x8: 19; CHECK: // %bb.0: 20; CHECK-NEXT: movi.8b v2, #165 21; CHECK-NEXT: and.8b v0, v0, v2 22; CHECK-NEXT: shl.8b v1, v1, #1 23; CHECK-NEXT: orr.8b v0, v0, v1 24; CHECK-NEXT: str d0, [x0] 25; CHECK-NEXT: ret 26 %and.i = and <8 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165> 27 %vshl_n = shl <8 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> 28 %result = or <8 x i8> %and.i, %vshl_n 29 store <8 x i8> %result, <8 x i8>* %dest, align 8 30 ret void 31} 32 33define void @testRightGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind { 34; CHECK-LABEL: testRightGood8x8: 35; CHECK: // %bb.0: 36; CHECK-NEXT: sri.8b v0, v1, #3 37; CHECK-NEXT: str d0, [x0] 38; CHECK-NEXT: ret 39 %and.i = and <8 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224> 40 %vshl_n = lshr <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 41 %result = or <8 x i8> %and.i, %vshl_n 42 store <8 x i8> %result, <8 x i8>* %dest, align 8 43 ret void 44} 45 46define void @testRightBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind { 47; CHECK-LABEL: testRightBad8x8: 48; CHECK: // %bb.0: 49; CHECK-NEXT: movi.8b v2, #165 50; CHECK-NEXT: and.8b v0, v0, v2 51; CHECK-NEXT: ushr.8b v1, v1, #1 52; CHECK-NEXT: orr.8b v0, v0, v1 53; CHECK-NEXT: str d0, [x0] 54; CHECK-NEXT: ret 55 %and.i = and <8 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165> 56 %vshl_n = lshr <8 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> 57 %result = or <8 x i8> %and.i, %vshl_n 58 store <8 x i8> %result, <8 x i8>* %dest, align 8 59 ret void 60} 61 62define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { 63; CHECK-LABEL: testLeftGood16x8: 64; CHECK: // %bb.0: 65; CHECK-NEXT: sli.16b v0, v1, #3 66; CHECK-NEXT: str q0, [x0] 67; CHECK-NEXT: ret 68 %and.i = and <16 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> 69 %vshl_n = shl <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 70 %result = or <16 x i8> %and.i, %vshl_n 71 store <16 x i8> %result, <16 x i8>* %dest, align 16 72 ret void 73} 74 75define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { 76; CHECK-LABEL: testLeftBad16x8: 77; CHECK: // %bb.0: 78; CHECK-NEXT: movi.16b v2, #165 79; CHECK-NEXT: and.16b v0, v0, v2 80; CHECK-NEXT: shl.16b v1, v1, #1 81; CHECK-NEXT: orr.16b v0, v0, v1 82; CHECK-NEXT: str q0, [x0] 83; CHECK-NEXT: ret 84 %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165> 85 %vshl_n = shl <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> 86 %result = or <16 x i8> %and.i, %vshl_n 87 store <16 x i8> %result, <16 x i8>* %dest, align 16 88 ret void 89} 90 91define void @testRightGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { 92; CHECK-LABEL: testRightGood16x8: 93; CHECK: // %bb.0: 94; CHECK-NEXT: sri.16b v0, v1, #3 95; CHECK-NEXT: str q0, [x0] 96; CHECK-NEXT: ret 97 %and.i = and <16 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224> 98 %vshl_n = lshr <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 99 %result = or <16 x i8> %and.i, %vshl_n 100 store <16 x i8> %result, <16 x i8>* %dest, align 16 101 ret void 102} 103 104define void @testRightBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { 105; CHECK-LABEL: testRightBad16x8: 106; CHECK: // %bb.0: 107; CHECK-NEXT: movi.16b v2, #165 108; CHECK-NEXT: and.16b v0, v0, v2 109; CHECK-NEXT: ushr.16b v1, v1, #1 110; CHECK-NEXT: orr.16b v0, v0, v1 111; CHECK-NEXT: str q0, [x0] 112; CHECK-NEXT: ret 113 %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165> 114 %vshl_n = lshr <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> 115 %result = or <16 x i8> %and.i, %vshl_n 116 store <16 x i8> %result, <16 x i8>* %dest, align 16 117 ret void 118} 119 120define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind { 121; CHECK-LABEL: testLeftGood4x16: 122; CHECK: // %bb.0: 123; CHECK-NEXT: sli.4h v0, v1, #14 124; CHECK-NEXT: str d0, [x0] 125; CHECK-NEXT: ret 126 %and.i = and <4 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383> 127 %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14> 128 %result = or <4 x i16> %and.i, %vshl_n 129 store <4 x i16> %result, <4 x i16>* %dest, align 8 130 ret void 131} 132 133define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind { 134; CHECK-LABEL: testLeftBad4x16: 135; CHECK: // %bb.0: 136; CHECK-NEXT: mov w8, #16500 137; CHECK-NEXT: dup.4h v2, w8 138; CHECK-NEXT: and.8b v0, v0, v2 139; CHECK-NEXT: shl.4h v1, v1, #14 140; CHECK-NEXT: orr.8b v0, v0, v1 141; CHECK-NEXT: str d0, [x0] 142; CHECK-NEXT: ret 143 %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500> 144 %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14> 145 %result = or <4 x i16> %and.i, %vshl_n 146 store <4 x i16> %result, <4 x i16>* %dest, align 8 147 ret void 148} 149 150define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind { 151; CHECK-LABEL: testRightGood4x16: 152; CHECK: // %bb.0: 153; CHECK-NEXT: sri.4h v0, v1, #14 154; CHECK-NEXT: str d0, [x0] 155; CHECK-NEXT: ret 156 %and.i = and <4 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532> 157 %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14> 158 %result = or <4 x i16> %and.i, %vshl_n 159 store <4 x i16> %result, <4 x i16>* %dest, align 8 160 ret void 161} 162 163define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind { 164; CHECK-LABEL: testRightBad4x16: 165; CHECK: // %bb.0: 166; CHECK-NEXT: mov w8, #16500 167; CHECK-NEXT: dup.4h v2, w8 168; CHECK-NEXT: and.8b v0, v0, v2 169; CHECK-NEXT: ushr.4h v1, v1, #14 170; CHECK-NEXT: orr.8b v0, v0, v1 171; CHECK-NEXT: str d0, [x0] 172; CHECK-NEXT: ret 173 %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500> 174 %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14> 175 %result = or <4 x i16> %and.i, %vshl_n 176 store <4 x i16> %result, <4 x i16>* %dest, align 8 177 ret void 178} 179 180define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind { 181; CHECK-LABEL: testLeftGood8x16: 182; CHECK: // %bb.0: 183; CHECK-NEXT: sli.8h v0, v1, #14 184; CHECK-NEXT: str q0, [x0] 185; CHECK-NEXT: ret 186 %and.i = and <8 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383> 187 %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14> 188 %result = or <8 x i16> %and.i, %vshl_n 189 store <8 x i16> %result, <8 x i16>* %dest, align 16 190 ret void 191} 192 193define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind { 194; CHECK-LABEL: testLeftBad8x16: 195; CHECK: // %bb.0: 196; CHECK-NEXT: mov w8, #16500 197; CHECK-NEXT: dup.8h v2, w8 198; CHECK-NEXT: and.16b v0, v0, v2 199; CHECK-NEXT: shl.8h v1, v1, #14 200; CHECK-NEXT: orr.16b v0, v0, v1 201; CHECK-NEXT: str q0, [x0] 202; CHECK-NEXT: ret 203 %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500> 204 %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14> 205 %result = or <8 x i16> %and.i, %vshl_n 206 store <8 x i16> %result, <8 x i16>* %dest, align 16 207 ret void 208} 209 210define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind { 211; CHECK-LABEL: testRightGood8x16: 212; CHECK: // %bb.0: 213; CHECK-NEXT: sri.8h v0, v1, #14 214; CHECK-NEXT: str q0, [x0] 215; CHECK-NEXT: ret 216 %and.i = and <8 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532> 217 %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14> 218 %result = or <8 x i16> %and.i, %vshl_n 219 store <8 x i16> %result, <8 x i16>* %dest, align 16 220 ret void 221} 222 223define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind { 224; CHECK-LABEL: testRightBad8x16: 225; CHECK: // %bb.0: 226; CHECK-NEXT: mov w8, #16500 227; CHECK-NEXT: dup.8h v2, w8 228; CHECK-NEXT: and.16b v0, v0, v2 229; CHECK-NEXT: ushr.8h v1, v1, #14 230; CHECK-NEXT: orr.16b v0, v0, v1 231; CHECK-NEXT: str q0, [x0] 232; CHECK-NEXT: ret 233 %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500> 234 %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14> 235 %result = or <8 x i16> %and.i, %vshl_n 236 store <8 x i16> %result, <8 x i16>* %dest, align 16 237 ret void 238} 239 240define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind { 241; CHECK-LABEL: testLeftGood2x32: 242; CHECK: // %bb.0: 243; CHECK-NEXT: sli.2s v0, v1, #22 244; CHECK-NEXT: str d0, [x0] 245; CHECK-NEXT: ret 246 %and.i = and <2 x i32> %src1, <i32 4194303, i32 4194303> 247 %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22> 248 %result = or <2 x i32> %and.i, %vshl_n 249 store <2 x i32> %result, <2 x i32>* %dest, align 8 250 ret void 251} 252 253define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind { 254; CHECK-LABEL: testLeftBad2x32: 255; CHECK: // %bb.0: 256; CHECK-NEXT: mov w8, #4194300 257; CHECK-NEXT: dup.2s v2, w8 258; CHECK-NEXT: and.8b v0, v0, v2 259; CHECK-NEXT: shl.2s v1, v1, #22 260; CHECK-NEXT: orr.8b v0, v0, v1 261; CHECK-NEXT: str d0, [x0] 262; CHECK-NEXT: ret 263 %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300> 264 %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22> 265 %result = or <2 x i32> %and.i, %vshl_n 266 store <2 x i32> %result, <2 x i32>* %dest, align 8 267 ret void 268} 269 270define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind { 271; CHECK-LABEL: testRightGood2x32: 272; CHECK: // %bb.0: 273; CHECK-NEXT: sri.2s v0, v1, #22 274; CHECK-NEXT: str d0, [x0] 275; CHECK-NEXT: ret 276 %and.i = and <2 x i32> %src1, <i32 4294966272, i32 4294966272> 277 %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22> 278 %result = or <2 x i32> %and.i, %vshl_n 279 store <2 x i32> %result, <2 x i32>* %dest, align 8 280 ret void 281} 282 283define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind { 284; CHECK-LABEL: testRightBad2x32: 285; CHECK: // %bb.0: 286; CHECK-NEXT: mov w8, #4194300 287; CHECK-NEXT: dup.2s v2, w8 288; CHECK-NEXT: and.8b v0, v0, v2 289; CHECK-NEXT: ushr.2s v1, v1, #22 290; CHECK-NEXT: orr.8b v0, v0, v1 291; CHECK-NEXT: str d0, [x0] 292; CHECK-NEXT: ret 293 %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300> 294 %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22> 295 %result = or <2 x i32> %and.i, %vshl_n 296 store <2 x i32> %result, <2 x i32>* %dest, align 8 297 ret void 298} 299 300define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind { 301; CHECK-LABEL: testLeftGood4x32: 302; CHECK: // %bb.0: 303; CHECK-NEXT: sli.4s v0, v1, #22 304; CHECK-NEXT: str q0, [x0] 305; CHECK-NEXT: ret 306 %and.i = and <4 x i32> %src1, <i32 4194303, i32 4194303, i32 4194303, i32 4194303> 307 %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22> 308 %result = or <4 x i32> %and.i, %vshl_n 309 store <4 x i32> %result, <4 x i32>* %dest, align 16 310 ret void 311} 312 313define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind { 314; CHECK-LABEL: testLeftBad4x32: 315; CHECK: // %bb.0: 316; CHECK-NEXT: mov w8, #4194300 317; CHECK-NEXT: dup.4s v2, w8 318; CHECK-NEXT: and.16b v0, v0, v2 319; CHECK-NEXT: shl.4s v1, v1, #22 320; CHECK-NEXT: orr.16b v0, v0, v1 321; CHECK-NEXT: str q0, [x0] 322; CHECK-NEXT: ret 323 %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300> 324 %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22> 325 %result = or <4 x i32> %and.i, %vshl_n 326 store <4 x i32> %result, <4 x i32>* %dest, align 16 327 ret void 328} 329 330define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind { 331; CHECK-LABEL: testRightGood4x32: 332; CHECK: // %bb.0: 333; CHECK-NEXT: sri.4s v0, v1, #22 334; CHECK-NEXT: str q0, [x0] 335; CHECK-NEXT: ret 336 %and.i = and <4 x i32> %src1, <i32 4294966272, i32 4294966272, i32 4294966272, i32 4294966272> 337 %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22> 338 %result = or <4 x i32> %and.i, %vshl_n 339 store <4 x i32> %result, <4 x i32>* %dest, align 16 340 ret void 341} 342 343define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind { 344; CHECK-LABEL: testRightBad4x32: 345; CHECK: // %bb.0: 346; CHECK-NEXT: mov w8, #4194300 347; CHECK-NEXT: dup.4s v2, w8 348; CHECK-NEXT: and.16b v0, v0, v2 349; CHECK-NEXT: ushr.4s v1, v1, #22 350; CHECK-NEXT: orr.16b v0, v0, v1 351; CHECK-NEXT: str q0, [x0] 352; CHECK-NEXT: ret 353 %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300> 354 %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22> 355 %result = or <4 x i32> %and.i, %vshl_n 356 store <4 x i32> %result, <4 x i32>* %dest, align 16 357 ret void 358} 359 360define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind { 361; CHECK-LABEL: testLeftGood2x64: 362; CHECK: // %bb.0: 363; CHECK-NEXT: sli.2d v0, v1, #48 364; CHECK-NEXT: str q0, [x0] 365; CHECK-NEXT: ret 366 %and.i = and <2 x i64> %src1, <i64 281474976710655, i64 281474976710655> 367 %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48> 368 %result = or <2 x i64> %and.i, %vshl_n 369 store <2 x i64> %result, <2 x i64>* %dest, align 16 370 ret void 371} 372 373define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind { 374; CHECK-LABEL: testLeftBad2x64: 375; CHECK: // %bb.0: 376; CHECK-NEXT: mov x8, #10 377; CHECK-NEXT: movk x8, #1, lsl #48 378; CHECK-NEXT: dup.2d v2, x8 379; CHECK-NEXT: and.16b v0, v0, v2 380; CHECK-NEXT: shl.2d v1, v1, #48 381; CHECK-NEXT: orr.16b v0, v0, v1 382; CHECK-NEXT: str q0, [x0] 383; CHECK-NEXT: ret 384 %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666> 385 %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48> 386 %result = or <2 x i64> %and.i, %vshl_n 387 store <2 x i64> %result, <2 x i64>* %dest, align 16 388 ret void 389} 390 391define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind { 392; CHECK-LABEL: testRightGood2x64: 393; CHECK: // %bb.0: 394; CHECK-NEXT: sri.2d v0, v1, #48 395; CHECK-NEXT: str q0, [x0] 396; CHECK-NEXT: ret 397 %and.i = and <2 x i64> %src1, <i64 18446744073709486080, i64 18446744073709486080> 398 %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48> 399 %result = or <2 x i64> %and.i, %vshl_n 400 store <2 x i64> %result, <2 x i64>* %dest, align 16 401 ret void 402} 403 404define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind { 405; CHECK-LABEL: testRightBad2x64: 406; CHECK: // %bb.0: 407; CHECK-NEXT: mov x8, #10 408; CHECK-NEXT: movk x8, #1, lsl #48 409; CHECK-NEXT: dup.2d v2, x8 410; CHECK-NEXT: and.16b v0, v0, v2 411; CHECK-NEXT: ushr.2d v1, v1, #48 412; CHECK-NEXT: orr.16b v0, v0, v1 413; CHECK-NEXT: str q0, [x0] 414; CHECK-NEXT: ret 415 %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666> 416 %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48> 417 %result = or <2 x i64> %and.i, %vshl_n 418 store <2 x i64> %result, <2 x i64>* %dest, align 16 419 ret void 420} 421 422define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2, <1 x i128>* %dest) nounwind { 423; CHECK-LABEL: testLeftShouldNotCreateSLI1x128: 424; CHECK: // %bb.0: 425; CHECK-NEXT: bfi x1, x2, #6, #58 426; CHECK-NEXT: stp x0, x1, [x4] 427; CHECK-NEXT: ret 428 %and.i = and <1 x i128> %src1, <i128 1180591620717411303423> 429 %vshl_n = shl <1 x i128> %src2, <i128 70> 430 %result = or <1 x i128> %and.i, %vshl_n 431 store <1 x i128> %result, <1 x i128>* %dest, align 16 432 ret void 433} 434 435define void @testLeftNotAllConstantBuildVec8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind { 436; CHECK-LABEL: testLeftNotAllConstantBuildVec8x8: 437; CHECK: // %bb.0: 438; CHECK-NEXT: adrp x8, .LCPI29_0 439; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI29_0] 440; CHECK-NEXT: shl.8b v1, v1, #3 441; CHECK-NEXT: and.8b v0, v0, v2 442; CHECK-NEXT: orr.8b v0, v0, v1 443; CHECK-NEXT: str d0, [x0] 444; CHECK-NEXT: ret 445 %and.i = and <8 x i8> %src1, <i8 7, i8 7, i8 255, i8 7, i8 7, i8 7, i8 255, i8 7> 446 %vshl_n = shl <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 447 %result = or <8 x i8> %and.i, %vshl_n 448 store <8 x i8> %result, <8 x i8>* %dest, align 8 449 ret void 450} 451