1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=KNL 3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mattr=+avx512f,+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=SKX 4 5 6define void @any_extend_load_v8i64(<8 x i8> * %ptr) { 7; ALL-LABEL: any_extend_load_v8i64: 8; ALL: # %bb.0: 9; ALL-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero 10; ALL-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 11; ALL-NEXT: vpmovqb %zmm0, (%rdi) 12; ALL-NEXT: vzeroupper 13; ALL-NEXT: retq 14 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1 15 %1 = zext <8 x i8> %wide.load to <8 x i64> 16 %2 = add nuw nsw <8 x i64> %1, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4> 17 %3 = xor <8 x i64> %2, zeroinitializer 18 %4 = trunc <8 x i64> %3 to <8 x i8> 19 store <8 x i8> %4, <8 x i8>* %ptr, align 1 20 ret void 21} 22 23define void @any_extend_load_v8i32(<8 x i8> * %ptr) { 24; KNL-LABEL: any_extend_load_v8i32: 25; KNL: # %bb.0: 26; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero 27; KNL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4] 28; KNL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 29; KNL-NEXT: vpmovdb %zmm0, %xmm0 30; KNL-NEXT: vmovq %xmm0, (%rdi) 31; KNL-NEXT: vzeroupper 32; KNL-NEXT: retq 33; 34; SKX-LABEL: any_extend_load_v8i32: 35; SKX: # %bb.0: 36; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero 37; SKX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0 38; SKX-NEXT: vpmovdb %ymm0, (%rdi) 39; SKX-NEXT: vzeroupper 40; SKX-NEXT: retq 41 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1 42 %1 = zext <8 x i8> %wide.load to <8 x i32> 43 %2 = add nuw nsw <8 x i32> %1, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> 44 %3 = xor <8 x i32> %2, zeroinitializer 45 %4 = trunc <8 x i32> %3 to <8 x i8> 46 store <8 x i8> %4, <8 x i8>* %ptr, align 1 47 ret void 48} 49 50define void @any_extend_load_v8i16(<8 x i8> * %ptr) { 51; KNL-LABEL: any_extend_load_v8i16: 52; KNL: # %bb.0: 53; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero 54; KNL-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 55; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] 56; KNL-NEXT: vmovq %xmm0, (%rdi) 57; KNL-NEXT: retq 58; 59; SKX-LABEL: any_extend_load_v8i16: 60; SKX: # %bb.0: 61; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero 62; SKX-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 63; SKX-NEXT: vpmovwb %xmm0, (%rdi) 64; SKX-NEXT: retq 65 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1 66 %1 = zext <8 x i8> %wide.load to <8 x i16> 67 %2 = add nuw nsw <8 x i16> %1, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> 68 %3 = xor <8 x i16> %2, zeroinitializer 69 %4 = trunc <8 x i16> %3 to <8 x i8> 70 store <8 x i8> %4, <8 x i8>* %ptr, align 1 71 ret void 72} 73