1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
2
3@e = global [8 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8], align 16
4@d = global [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 16
5
6; The global 'e' has 16 byte alignment, so make sure we don't generate an
7; aligned 32-byte load instruction when we combine the load+insert sequence.
8
9define i32 @subb() nounwind ssp {
10; CHECK-LABEL: subb:
11; CHECK:  vmovups e(%rip), %ymm
12entry:
13  %0 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 7), align 4
14  %1 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 6), align 8
15  %2 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 5), align 4
16  %3 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 4), align 16
17  %4 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 3), align 4
18  %5 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 2), align 8
19  %6 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 1), align 4
20  %7 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 0), align 16
21  %vecinit.i = insertelement <8 x i32> undef, i32 %7, i32 0
22  %vecinit1.i = insertelement <8 x i32> %vecinit.i, i32 %6, i32 1
23  %vecinit2.i = insertelement <8 x i32> %vecinit1.i, i32 %5, i32 2
24  %vecinit3.i = insertelement <8 x i32> %vecinit2.i, i32 %4, i32 3
25  %vecinit4.i = insertelement <8 x i32> %vecinit3.i, i32 %3, i32 4
26  %vecinit5.i = insertelement <8 x i32> %vecinit4.i, i32 %2, i32 5
27  %vecinit6.i = insertelement <8 x i32> %vecinit5.i, i32 %1, i32 6
28  %vecinit7.i = insertelement <8 x i32> %vecinit6.i, i32 %0, i32 7
29  %8 = bitcast <8 x i32> %vecinit7.i to <32 x i8>
30  tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8)
31  ret i32 0
32}
33
34declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
35
36