1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-32 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-64 4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-32 5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-64 6 7define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6, double %a7) { 8; AVX-32-LABEL: test_buildvector_v8f64: 9; AVX-32: # %bb.0: 10; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 11; AVX-32-NEXT: retl 12; 13; AVX-64-LABEL: test_buildvector_v8f64: 14; AVX-64: # %bb.0: 15; AVX-64-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] 16; AVX-64-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] 17; AVX-64-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 18; AVX-64-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] 19; AVX-64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] 20; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 21; AVX-64-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 22; AVX-64-NEXT: retq 23 %ins0 = insertelement <8 x double> undef, double %a0, i32 0 24 %ins1 = insertelement <8 x double> %ins0, double %a1, i32 1 25 %ins2 = insertelement <8 x double> %ins1, double %a2, i32 2 26 %ins3 = insertelement <8 x double> %ins2, double %a3, i32 3 27 %ins4 = insertelement <8 x double> %ins3, double %a4, i32 4 28 %ins5 = insertelement <8 x double> %ins4, double %a5, i32 5 29 %ins6 = insertelement <8 x double> %ins5, double %a6, i32 6 30 %ins7 = insertelement <8 x double> %ins6, double %a7, i32 7 31 ret <8 x double> %ins7 32} 33 34define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15) { 35; AVX-32-LABEL: test_buildvector_v16f32: 36; AVX-32: # %bb.0: 37; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 38; AVX-32-NEXT: retl 39; 40; AVX-64-LABEL: test_buildvector_v16f32: 41; AVX-64: # %bb.0: 42; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] 43; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] 44; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] 45; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] 46; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] 47; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] 48; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 49; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 50; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] 51; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] 52; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] 53; AVX-64-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 54; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] 55; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] 56; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] 57; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 58; AVX-64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 59; AVX-64-NEXT: retq 60 %ins0 = insertelement <16 x float> undef, float %a0, i32 0 61 %ins1 = insertelement <16 x float> %ins0, float %a1, i32 1 62 %ins2 = insertelement <16 x float> %ins1, float %a2, i32 2 63 %ins3 = insertelement <16 x float> %ins2, float %a3, i32 3 64 %ins4 = insertelement <16 x float> %ins3, float %a4, i32 4 65 %ins5 = insertelement <16 x float> %ins4, float %a5, i32 5 66 %ins6 = insertelement <16 x float> %ins5, float %a6, i32 6 67 %ins7 = insertelement <16 x float> %ins6, float %a7, i32 7 68 %ins8 = insertelement <16 x float> %ins7, float %a8, i32 8 69 %ins9 = insertelement <16 x float> %ins8, float %a9, i32 9 70 %ins10 = insertelement <16 x float> %ins9, float %a10, i32 10 71 %ins11 = insertelement <16 x float> %ins10, float %a11, i32 11 72 %ins12 = insertelement <16 x float> %ins11, float %a12, i32 12 73 %ins13 = insertelement <16 x float> %ins12, float %a13, i32 13 74 %ins14 = insertelement <16 x float> %ins13, float %a14, i32 14 75 %ins15 = insertelement <16 x float> %ins14, float %a15, i32 15 76 ret <16 x float> %ins15 77} 78 79define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) { 80; AVX-32-LABEL: test_buildvector_v8i64: 81; AVX-32: # %bb.0: 82; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 83; AVX-32-NEXT: retl 84; 85; AVX-64-LABEL: test_buildvector_v8i64: 86; AVX-64: # %bb.0: 87; AVX-64-NEXT: vmovq %rcx, %xmm0 88; AVX-64-NEXT: vmovq %rdx, %xmm1 89; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] 90; AVX-64-NEXT: vmovq %rsi, %xmm1 91; AVX-64-NEXT: vmovq %rdi, %xmm2 92; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] 93; AVX-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 94; AVX-64-NEXT: vmovq %r9, %xmm1 95; AVX-64-NEXT: vmovq %r8, %xmm2 96; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] 97; AVX-64-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 98; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 99; AVX-64-NEXT: retq 100 %ins0 = insertelement <8 x i64> undef, i64 %a0, i32 0 101 %ins1 = insertelement <8 x i64> %ins0, i64 %a1, i32 1 102 %ins2 = insertelement <8 x i64> %ins1, i64 %a2, i32 2 103 %ins3 = insertelement <8 x i64> %ins2, i64 %a3, i32 3 104 %ins4 = insertelement <8 x i64> %ins3, i64 %a4, i32 4 105 %ins5 = insertelement <8 x i64> %ins4, i64 %a5, i32 5 106 %ins6 = insertelement <8 x i64> %ins5, i64 %a6, i32 6 107 %ins7 = insertelement <8 x i64> %ins6, i64 %a7, i32 7 108 ret <8 x i64> %ins7 109} 110 111define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) { 112; AVX-32-LABEL: test_buildvector_v16i32: 113; AVX-32: # %bb.0: 114; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 115; AVX-32-NEXT: retl 116; 117; AVX-64-LABEL: test_buildvector_v16i32: 118; AVX-64: # %bb.0: 119; AVX-64-NEXT: vmovd %edi, %xmm0 120; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 121; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 122; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 123; AVX-64-NEXT: vmovd %r8d, %xmm1 124; AVX-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 125; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 126; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 127; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 128; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 129; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 130; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 131; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 132; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero 133; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 134; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 135; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 136; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 137; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 138; AVX-64-NEXT: retq 139 %ins0 = insertelement <16 x i32> undef, i32 %a0, i32 0 140 %ins1 = insertelement <16 x i32> %ins0, i32 %a1, i32 1 141 %ins2 = insertelement <16 x i32> %ins1, i32 %a2, i32 2 142 %ins3 = insertelement <16 x i32> %ins2, i32 %a3, i32 3 143 %ins4 = insertelement <16 x i32> %ins3, i32 %a4, i32 4 144 %ins5 = insertelement <16 x i32> %ins4, i32 %a5, i32 5 145 %ins6 = insertelement <16 x i32> %ins5, i32 %a6, i32 6 146 %ins7 = insertelement <16 x i32> %ins6, i32 %a7, i32 7 147 %ins8 = insertelement <16 x i32> %ins7, i32 %a8, i32 8 148 %ins9 = insertelement <16 x i32> %ins8, i32 %a9, i32 9 149 %ins10 = insertelement <16 x i32> %ins9, i32 %a10, i32 10 150 %ins11 = insertelement <16 x i32> %ins10, i32 %a11, i32 11 151 %ins12 = insertelement <16 x i32> %ins11, i32 %a12, i32 12 152 %ins13 = insertelement <16 x i32> %ins12, i32 %a13, i32 13 153 %ins14 = insertelement <16 x i32> %ins13, i32 %a14, i32 14 154 %ins15 = insertelement <16 x i32> %ins14, i32 %a15, i32 15 155 ret <16 x i32> %ins15 156} 157 158define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15, i16 %a16, i16 %a17, i16 %a18, i16 %a19, i16 %a20, i16 %a21, i16 %a22, i16 %a23, i16 %a24, i16 %a25, i16 %a26, i16 %a27, i16 %a28, i16 %a29, i16 %a30, i16 %a31) { 159; AVX-32-LABEL: test_buildvector_v32i16: 160; AVX-32: # %bb.0: 161; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 162; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 163; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 164; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 165; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 166; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 167; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 168; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 169; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 170; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 171; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 172; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 173; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 174; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 175; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 176; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 177; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 178; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 179; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 180; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 181; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 182; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 183; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 184; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 185; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 186; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero 187; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 188; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 189; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 190; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 191; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 192; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 193; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 194; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 195; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 196; AVX-32-NEXT: retl 197; 198; AVX-64-LABEL: test_buildvector_v32i16: 199; AVX-64: # %bb.0: 200; AVX-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 201; AVX-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 202; AVX-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 203; AVX-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 204; AVX-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 205; AVX-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 206; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 207; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 208; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 209; AVX-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 210; AVX-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 211; AVX-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 212; AVX-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 213; AVX-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 214; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 215; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 216; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 217; AVX-64-NEXT: vmovd %edi, %xmm1 218; AVX-64-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1 219; AVX-64-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 220; AVX-64-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 221; AVX-64-NEXT: vpinsrw $4, %r8d, %xmm1, %xmm1 222; AVX-64-NEXT: vpinsrw $5, %r9d, %xmm1, %xmm1 223; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 224; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 225; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero 226; AVX-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 227; AVX-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 228; AVX-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 229; AVX-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 230; AVX-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 231; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 232; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 233; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 234; AVX-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 235; AVX-64-NEXT: retq 236 %ins0 = insertelement <32 x i16> undef, i16 %a0, i32 0 237 %ins1 = insertelement <32 x i16> %ins0, i16 %a1, i32 1 238 %ins2 = insertelement <32 x i16> %ins1, i16 %a2, i32 2 239 %ins3 = insertelement <32 x i16> %ins2, i16 %a3, i32 3 240 %ins4 = insertelement <32 x i16> %ins3, i16 %a4, i32 4 241 %ins5 = insertelement <32 x i16> %ins4, i16 %a5, i32 5 242 %ins6 = insertelement <32 x i16> %ins5, i16 %a6, i32 6 243 %ins7 = insertelement <32 x i16> %ins6, i16 %a7, i32 7 244 %ins8 = insertelement <32 x i16> %ins7, i16 %a8, i32 8 245 %ins9 = insertelement <32 x i16> %ins8, i16 %a9, i32 9 246 %ins10 = insertelement <32 x i16> %ins9, i16 %a10, i32 10 247 %ins11 = insertelement <32 x i16> %ins10, i16 %a11, i32 11 248 %ins12 = insertelement <32 x i16> %ins11, i16 %a12, i32 12 249 %ins13 = insertelement <32 x i16> %ins12, i16 %a13, i32 13 250 %ins14 = insertelement <32 x i16> %ins13, i16 %a14, i32 14 251 %ins15 = insertelement <32 x i16> %ins14, i16 %a15, i32 15 252 %ins16 = insertelement <32 x i16> %ins15, i16 %a16, i32 16 253 %ins17 = insertelement <32 x i16> %ins16, i16 %a17, i32 17 254 %ins18 = insertelement <32 x i16> %ins17, i16 %a18, i32 18 255 %ins19 = insertelement <32 x i16> %ins18, i16 %a19, i32 19 256 %ins20 = insertelement <32 x i16> %ins19, i16 %a20, i32 20 257 %ins21 = insertelement <32 x i16> %ins20, i16 %a21, i32 21 258 %ins22 = insertelement <32 x i16> %ins21, i16 %a22, i32 22 259 %ins23 = insertelement <32 x i16> %ins22, i16 %a23, i32 23 260 %ins24 = insertelement <32 x i16> %ins23, i16 %a24, i32 24 261 %ins25 = insertelement <32 x i16> %ins24, i16 %a25, i32 25 262 %ins26 = insertelement <32 x i16> %ins25, i16 %a26, i32 26 263 %ins27 = insertelement <32 x i16> %ins26, i16 %a27, i32 27 264 %ins28 = insertelement <32 x i16> %ins27, i16 %a28, i32 28 265 %ins29 = insertelement <32 x i16> %ins28, i16 %a29, i32 29 266 %ins30 = insertelement <32 x i16> %ins29, i16 %a30, i32 30 267 %ins31 = insertelement <32 x i16> %ins30, i16 %a31, i32 31 268 ret <32 x i16> %ins31 269} 270 271define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31, i8 %a32, i8 %a33, i8 %a34, i8 %a35, i8 %a36, i8 %a37, i8 %a38, i8 %a39, i8 %a40, i8 %a41, i8 %a42, i8 %a43, i8 %a44, i8 %a45, i8 %a46, i8 %a47, i8 %a48, i8 %a49, i8 %a50, i8 %a51, i8 %a52, i8 %a53, i8 %a54, i8 %a55, i8 %a56, i8 %a57, i8 %a58, i8 %a59, i8 %a60, i8 %a61, i8 %a62, i8 %a63) { 272; AVX-32-LABEL: test_buildvector_v64i8: 273; AVX-32: # %bb.0: 274; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 275; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 276; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 277; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 278; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 279; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 280; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 281; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 282; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 283; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 284; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 285; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 286; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 287; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 288; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 289; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 290; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 291; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 292; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 293; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 294; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 295; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 296; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 297; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 298; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 299; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 300; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 301; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 302; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 303; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 304; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 305; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 306; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 307; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 308; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 309; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 310; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 311; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 312; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 313; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 314; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 315; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 316; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 317; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 318; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 319; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 320; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 321; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 322; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 323; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero 324; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 325; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 326; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 327; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 328; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 329; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 330; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 331; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2 332; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2 333; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2 334; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2 335; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2 336; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2 337; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2 338; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2 339; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 340; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 341; AVX-32-NEXT: retl 342; 343; AVX-64-LABEL: test_buildvector_v64i8: 344; AVX-64: # %bb.0: 345; AVX-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero 346; AVX-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 347; AVX-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 348; AVX-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 349; AVX-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 350; AVX-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 351; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 352; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 353; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 354; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 355; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 356; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 357; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 358; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 359; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 360; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 361; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 362; AVX-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 363; AVX-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 364; AVX-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 365; AVX-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 366; AVX-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 367; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 368; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 369; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 370; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 371; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 372; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 373; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 374; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 375; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 376; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 377; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 378; AVX-64-NEXT: vmovd %edi, %xmm1 379; AVX-64-NEXT: vpinsrb $1, %esi, %xmm1, %xmm1 380; AVX-64-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1 381; AVX-64-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 382; AVX-64-NEXT: vpinsrb $4, %r8d, %xmm1, %xmm1 383; AVX-64-NEXT: vpinsrb $5, %r9d, %xmm1, %xmm1 384; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 385; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 386; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 387; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 388; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 389; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 390; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 391; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 392; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 393; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 394; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero 395; AVX-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 396; AVX-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 397; AVX-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 398; AVX-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 399; AVX-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 400; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 401; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 402; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 403; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 404; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 405; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 406; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 407; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 408; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 409; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 410; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 411; AVX-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 412; AVX-64-NEXT: retq 413 %ins0 = insertelement <64 x i8> undef, i8 %a0, i32 0 414 %ins1 = insertelement <64 x i8> %ins0, i8 %a1, i32 1 415 %ins2 = insertelement <64 x i8> %ins1, i8 %a2, i32 2 416 %ins3 = insertelement <64 x i8> %ins2, i8 %a3, i32 3 417 %ins4 = insertelement <64 x i8> %ins3, i8 %a4, i32 4 418 %ins5 = insertelement <64 x i8> %ins4, i8 %a5, i32 5 419 %ins6 = insertelement <64 x i8> %ins5, i8 %a6, i32 6 420 %ins7 = insertelement <64 x i8> %ins6, i8 %a7, i32 7 421 %ins8 = insertelement <64 x i8> %ins7, i8 %a8, i32 8 422 %ins9 = insertelement <64 x i8> %ins8, i8 %a9, i32 9 423 %ins10 = insertelement <64 x i8> %ins9, i8 %a10, i32 10 424 %ins11 = insertelement <64 x i8> %ins10, i8 %a11, i32 11 425 %ins12 = insertelement <64 x i8> %ins11, i8 %a12, i32 12 426 %ins13 = insertelement <64 x i8> %ins12, i8 %a13, i32 13 427 %ins14 = insertelement <64 x i8> %ins13, i8 %a14, i32 14 428 %ins15 = insertelement <64 x i8> %ins14, i8 %a15, i32 15 429 %ins16 = insertelement <64 x i8> %ins15, i8 %a16, i32 16 430 %ins17 = insertelement <64 x i8> %ins16, i8 %a17, i32 17 431 %ins18 = insertelement <64 x i8> %ins17, i8 %a18, i32 18 432 %ins19 = insertelement <64 x i8> %ins18, i8 %a19, i32 19 433 %ins20 = insertelement <64 x i8> %ins19, i8 %a20, i32 20 434 %ins21 = insertelement <64 x i8> %ins20, i8 %a21, i32 21 435 %ins22 = insertelement <64 x i8> %ins21, i8 %a22, i32 22 436 %ins23 = insertelement <64 x i8> %ins22, i8 %a23, i32 23 437 %ins24 = insertelement <64 x i8> %ins23, i8 %a24, i32 24 438 %ins25 = insertelement <64 x i8> %ins24, i8 %a25, i32 25 439 %ins26 = insertelement <64 x i8> %ins25, i8 %a26, i32 26 440 %ins27 = insertelement <64 x i8> %ins26, i8 %a27, i32 27 441 %ins28 = insertelement <64 x i8> %ins27, i8 %a28, i32 28 442 %ins29 = insertelement <64 x i8> %ins28, i8 %a29, i32 29 443 %ins30 = insertelement <64 x i8> %ins29, i8 %a30, i32 30 444 %ins31 = insertelement <64 x i8> %ins30, i8 %a31, i32 31 445 %ins32 = insertelement <64 x i8> %ins31, i8 %a32, i32 32 446 %ins33 = insertelement <64 x i8> %ins32, i8 %a33, i32 33 447 %ins34 = insertelement <64 x i8> %ins33, i8 %a34, i32 34 448 %ins35 = insertelement <64 x i8> %ins34, i8 %a35, i32 35 449 %ins36 = insertelement <64 x i8> %ins35, i8 %a36, i32 36 450 %ins37 = insertelement <64 x i8> %ins36, i8 %a37, i32 37 451 %ins38 = insertelement <64 x i8> %ins37, i8 %a38, i32 38 452 %ins39 = insertelement <64 x i8> %ins38, i8 %a39, i32 39 453 %ins40 = insertelement <64 x i8> %ins39, i8 %a40, i32 40 454 %ins41 = insertelement <64 x i8> %ins40, i8 %a41, i32 41 455 %ins42 = insertelement <64 x i8> %ins41, i8 %a42, i32 42 456 %ins43 = insertelement <64 x i8> %ins42, i8 %a43, i32 43 457 %ins44 = insertelement <64 x i8> %ins43, i8 %a44, i32 44 458 %ins45 = insertelement <64 x i8> %ins44, i8 %a45, i32 45 459 %ins46 = insertelement <64 x i8> %ins45, i8 %a46, i32 46 460 %ins47 = insertelement <64 x i8> %ins46, i8 %a47, i32 47 461 %ins48 = insertelement <64 x i8> %ins47, i8 %a48, i32 48 462 %ins49 = insertelement <64 x i8> %ins48, i8 %a49, i32 49 463 %ins50 = insertelement <64 x i8> %ins49, i8 %a50, i32 50 464 %ins51 = insertelement <64 x i8> %ins50, i8 %a51, i32 51 465 %ins52 = insertelement <64 x i8> %ins51, i8 %a52, i32 52 466 %ins53 = insertelement <64 x i8> %ins52, i8 %a53, i32 53 467 %ins54 = insertelement <64 x i8> %ins53, i8 %a54, i32 54 468 %ins55 = insertelement <64 x i8> %ins54, i8 %a55, i32 55 469 %ins56 = insertelement <64 x i8> %ins55, i8 %a56, i32 56 470 %ins57 = insertelement <64 x i8> %ins56, i8 %a57, i32 57 471 %ins58 = insertelement <64 x i8> %ins57, i8 %a58, i32 58 472 %ins59 = insertelement <64 x i8> %ins58, i8 %a59, i32 59 473 %ins60 = insertelement <64 x i8> %ins59, i8 %a60, i32 60 474 %ins61 = insertelement <64 x i8> %ins60, i8 %a61, i32 61 475 %ins62 = insertelement <64 x i8> %ins61, i8 %a62, i32 62 476 %ins63 = insertelement <64 x i8> %ins62, i8 %a63, i32 63 477 ret <64 x i8> %ins63 478} 479