1; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
2; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
3
4; CHECK: vmovaps
5; CHECK: vmovaps
6; CHECK: vmovaps
7; CHECK: vmovaps
8; CHECK: vmovaps
9; CHECK: vmovaps
10define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
11entry:
12  %0 = bitcast double* %d to <4 x double>*
13  %tmp1.i = load <4 x double>* %0, align 32
14  %1 = bitcast float* %f to <8 x float>*
15  %tmp1.i17 = load <8 x float>* %1, align 32
16  %tmp1.i16 = load <4 x i64>* %i, align 32
17  tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
18  store <4 x double> %tmp1.i, <4 x double>* %0, align 32
19  store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
20  store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
21  ret void
22}
23
24declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
25
26;;
27;; The two tests below check that we must fold load + scalar_to_vector
28;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
29
30; CHECK: mov00
31define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
32  %val = load float* %ptr
33; CHECK: vinsertps
34; CHECK: vinsertf128
35  %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
36  ret <8 x float> %i0
37; CHECK: ret
38}
39
40; CHECK: mov01
41define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
42  %val = load double* %ptr
43; CHECK: vmovlpd
44; CHECK: vinsertf128
45  %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
46  ret <4 x double> %i0
47; CHECK: ret
48}
49
50; CHECK: vmovaps  %ymm
51define void @storev16i16(<16 x i16> %a) nounwind {
52  store <16 x i16> %a, <16 x i16>* undef, align 32
53  unreachable
54}
55
56; CHECK: storev16i16_01
57; CHECK: vextractf128
58; CHECK: vmovups  %xmm
59define void @storev16i16_01(<16 x i16> %a) nounwind {
60  store <16 x i16> %a, <16 x i16>* undef, align 4
61  unreachable
62}
63
64; CHECK: storev32i8
65; CHECK: vmovaps  %ymm
66define void @storev32i8(<32 x i8> %a) nounwind {
67  store <32 x i8> %a, <32 x i8>* undef, align 32
68  unreachable
69}
70
71; CHECK: storev32i8_01
72; CHECK: vextractf128
73; CHECK: vmovups  %xmm
74define void @storev32i8_01(<32 x i8> %a) nounwind {
75  store <32 x i8> %a, <32 x i8>* undef, align 4
76  unreachable
77}
78
79; It is faster to make two saves, if the data is already in XMM registers. For
80; example, after making an integer operation.
81; CHECK: _double_save
82; CHECK-NOT: vinsertf128 $1
83; CHECK-NOT: vinsertf128 $0
84; CHECK: vmovaps %xmm
85; CHECK: vmovaps %xmm
86define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
87entry:
88  %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
89  store <8 x i32> %Z, <8 x i32>* %P, align 16
90  ret void
91}
92
93declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind
94
95; CHECK_O0: _f_f
96; CHECK-O0: vmovss LCPI
97; CHECK-O0: vxorps  %xmm
98; CHECK-O0: vmovss %xmm
99define void @f_f() nounwind {
100allocas:
101  br i1 undef, label %cif_mask_all, label %cif_mask_mixed
102
103cif_mask_all:                                     ; preds = %allocas
104  unreachable
105
106cif_mask_mixed:                                   ; preds = %allocas
107  br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
108
109cif_mixed_test_all:                               ; preds = %cif_mask_mixed
110  call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind
111  unreachable
112
113cif_mixed_test_any_check:                         ; preds = %cif_mask_mixed
114  unreachable
115}
116
117; CHECK: add8i32
118; CHECK: vmovups
119; CHECK: vmovups
120; CHECK-NOT: vinsertf128
121; CHECK-NOT: vextractf128
122; CHECK: vmovups
123; CHECK: vmovups
124define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
125  %b = load <8 x i32>* %bp, align 1
126  %x = add <8 x i32> zeroinitializer, %b
127  store <8 x i32> %x, <8 x i32>* %ret, align 1
128  ret void
129}
130
131; CHECK: add4i64a64
132; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
133; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
134define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
135  %b = load <4 x i64>* %bp, align 64
136  %x = add <4 x i64> zeroinitializer, %b
137  store <4 x i64> %x, <4 x i64>* %ret, align 64
138  ret void
139}
140
141; CHECK: add4i64a16
142; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
143; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
144; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
145; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
146define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
147  %b = load <4 x i64>* %bp, align 16
148  %x = add <4 x i64> zeroinitializer, %b
149  store <4 x i64> %x, <4 x i64>* %ret, align 16
150  ret void
151}
152