1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mcpu=corei7 | FileCheck %s
3
4define void @foo(i8* %v0, i8* readonly %v1) {
5; CHECK-LABEL: @foo(
6; CHECK-NEXT:    [[T0:%.*]] = bitcast i8* [[V0:%.*]] to i32*
7; CHECK-NEXT:    [[T1:%.*]] = bitcast i8* [[V1:%.*]] to i32*
8; CHECK-NEXT:    [[T02:%.*]] = bitcast i8* [[V0]] to i64*
9; CHECK-NEXT:    [[T12:%.*]] = bitcast i8* [[V1]] to i64*
10; CHECK-NEXT:    [[T14:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 4
11; CHECK-NEXT:    [[T18:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 5
12; CHECK-NEXT:    [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6
13; CHECK-NEXT:    [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 7
14; CHECK-NEXT:    [[T142:%.*]] = getelementptr inbounds i64, i64* [[T12]], i64 8
15; CHECK-NEXT:    [[T182:%.*]] = getelementptr inbounds i64, i64* [[T12]], i64 9
16; CHECK-NEXT:    [[T222:%.*]] = getelementptr inbounds i64, i64* [[T12]], i64 10
17; CHECK-NEXT:    [[T262:%.*]] = getelementptr inbounds i64, i64* [[T12]], i64 11
18; CHECK-NEXT:    [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 4
19; CHECK-NEXT:    [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5
20; CHECK-NEXT:    [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6
21; CHECK-NEXT:    [[T32:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7
22; CHECK-NEXT:    [[T212:%.*]] = getelementptr inbounds i64, i64* [[T02]], i64 8
23; CHECK-NEXT:    [[T252:%.*]] = getelementptr inbounds i64, i64* [[T02]], i64 9
24; CHECK-NEXT:    [[T292:%.*]] = getelementptr inbounds i64, i64* [[T02]], i64 10
25; CHECK-NEXT:    [[T322:%.*]] = getelementptr inbounds i64, i64* [[T02]], i64 11
26; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[T14]] to <4 x i32>*
27; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
28; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[T142]] to <2 x i64>*
29; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* [[TMP3]], align 8
30; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i64* [[T222]] to <2 x i64>*
31; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* [[TMP5]], align 8
32; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> [[TMP2]], <i32 4, i32 4, i32 6, i32 7>
33; CHECK-NEXT:    [[TMP8:%.*]] = add nsw <2 x i64> [[TMP4]], <i64 4, i64 4>
34; CHECK-NEXT:    [[TMP9:%.*]] = add nsw <2 x i64> [[TMP6]], <i64 6, i64 7>
35; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i64* [[T212]] to <2 x i64>*
36; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64>* [[TMP10]], align 8
37; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i64* [[T292]] to <2 x i64>*
38; CHECK-NEXT:    store <2 x i64> [[TMP9]], <2 x i64>* [[TMP11]], align 8
39; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i32* [[T21]] to <4 x i32>*
40; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP12]], align 4
41; CHECK-NEXT:    ret void
42;
43  %t0 = bitcast i8* %v0 to i32*
44  %t1 = bitcast i8* %v1 to i32*
45
46  %t02 = bitcast i8* %v0 to i64*
47  %t12 = bitcast i8* %v1 to i64*
48
49  %t14 = getelementptr inbounds i32, i32* %t1, i64 4
50  %t18 = getelementptr inbounds i32, i32* %t1, i64 5
51  %t22 = getelementptr inbounds i32, i32* %t1, i64 6
52  %t26 = getelementptr inbounds i32, i32* %t1, i64 7
53
54  %t142 = getelementptr inbounds i64, i64* %t12, i64 8
55  %t182 = getelementptr inbounds i64, i64* %t12, i64 9
56  %t222 = getelementptr inbounds i64, i64* %t12, i64 10
57  %t262 = getelementptr inbounds i64, i64* %t12, i64 11
58
59  %t21 = getelementptr inbounds i32, i32* %t0, i64 4
60  %t25 = getelementptr inbounds i32, i32* %t0, i64 5
61  %t29 = getelementptr inbounds i32, i32* %t0, i64 6
62  %t32 = getelementptr inbounds i32, i32* %t0, i64 7
63
64  %t212 = getelementptr inbounds i64, i64* %t02, i64 8
65  %t252 = getelementptr inbounds i64, i64* %t02, i64 9
66  %t292 = getelementptr inbounds i64, i64* %t02, i64 10
67  %t322 = getelementptr inbounds i64, i64* %t02, i64 11
68
69  %t19 = load i32, i32* %t14, align 4
70  %t23 = load i32, i32* %t18, align 4
71  %t27 = load i32, i32* %t22, align 4
72  %t30 = load i32, i32* %t26, align 4
73
74  %t192 = load i64, i64* %t142, align 8
75  %t232 = load i64, i64* %t182, align 8
76  %t272 = load i64, i64* %t222, align 8
77  %t302 = load i64, i64* %t262, align 8
78
79  %t20 = add nsw i32 %t19, 4
80  %t24 = add nsw i32 %t23, 4
81  %t28 = add nsw i32 %t27, 6
82  %t31 = add nsw i32 %t30, 7
83
84  %t202 = add nsw i64 %t192, 4
85  %t242 = add nsw i64 %t232, 4
86  %t282 = add nsw i64 %t272, 6
87  %t312 = add nsw i64 %t302, 7
88
89  store i64 %t202, i64* %t212, align 8
90  store i64 %t242, i64* %t252, align 8
91  store i64 %t282, i64* %t292, align 8
92  store i64 %t312, i64* %t322, align 8
93
94  store i32 %t20, i32* %t21, align 4
95  store i32 %t24, i32* %t25, align 4
96  store i32 %t28, i32* %t29, align 4
97  store i32 %t31, i32* %t32, align 4
98
99  ret void
100}
101