1; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
2; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
4
5define double @t1(float* nocapture %x) nounwind readonly ssp {
6entry:
7; SSE-LABEL: t1:
8; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
9; SSE: cvtss2sd %xmm0, %xmm0
10
11  %0 = load float* %x, align 4
12  %1 = fpext float %0 to double
13  ret double %1
14}
15
16define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
17entry:
18; SSE-LABEL: t2:
19; SSE: cvtsd2ss ([[A0]]), %xmm0
20  %0 = load double* %x, align 8
21  %1 = fptrunc double %0 to float
22  ret float %1
23}
24
25define float @squirtf(float* %x) nounwind {
26entry:
27; SSE-LABEL: squirtf:
28; SSE: movss ([[A0]]), %xmm0
29; SSE: sqrtss %xmm0, %xmm0
30  %z = load float* %x
31  %t = call float @llvm.sqrt.f32(float %z)
32  ret float %t
33}
34
35define double @squirt(double* %x) nounwind {
36entry:
37; SSE-LABEL: squirt:
38; SSE: movsd ([[A0]]), %xmm0
39; SSE: sqrtsd %xmm0, %xmm0
40  %z = load double* %x
41  %t = call double @llvm.sqrt.f64(double %z)
42  ret double %t
43}
44
45define float @squirtf_size(float* %x) nounwind optsize {
46entry:
47; SSE-LABEL: squirtf_size:
48; SSE: sqrtss ([[A0]]), %xmm0
49  %z = load float* %x
50  %t = call float @llvm.sqrt.f32(float %z)
51  ret float %t
52}
53
54define double @squirt_size(double* %x) nounwind optsize {
55entry:
56; SSE-LABEL: squirt_size:
57; SSE: sqrtsd ([[A0]]), %xmm0
58  %z = load double* %x
59  %t = call double @llvm.sqrt.f64(double %z)
60  ret double %t
61}
62
63declare float @llvm.sqrt.f32(float)
64declare double @llvm.sqrt.f64(double)
65
66; SSE-LABEL: loopdep1
67; SSE: for.body
68;
69; This loop contains two cvtsi2ss instructions that update the same xmm
70; register.  Verify that the execution dependency fix pass breaks those
71; dependencies by inserting xorps instructions.
72;
73; If the register allocator chooses different registers for the two cvtsi2ss
74; instructions, they are still dependent on themselves.
75; SSE: xorps [[XMM1:%xmm[0-9]+]]
76; SSE: , [[XMM1]]
77; SSE: cvtsi2ssl %{{.*}}, [[XMM1]]
78; SSE: xorps [[XMM2:%xmm[0-9]+]]
79; SSE: , [[XMM2]]
80; SSE: cvtsi2ssl %{{.*}}, [[XMM2]]
81;
82define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
83entry:
84  %tobool3 = icmp eq i32 %m, 0
85  br i1 %tobool3, label %for.end, label %for.body
86
87for.body:                                         ; preds = %entry, %for.body
88  %m.addr.07 = phi i32 [ %dec, %for.body ], [ %m, %entry ]
89  %s1.06 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
90  %s2.05 = phi float [ %add2, %for.body ], [ 0.000000e+00, %entry ]
91  %n.04 = phi i32 [ %inc, %for.body ], [ 1, %entry ]
92  %conv = sitofp i32 %n.04 to float
93  %add = fadd float %s1.06, %conv
94  %conv1 = sitofp i32 %m.addr.07 to float
95  %add2 = fadd float %s2.05, %conv1
96  %inc = add nsw i32 %n.04, 1
97  %dec = add nsw i32 %m.addr.07, -1
98  %tobool = icmp eq i32 %dec, 0
99  br i1 %tobool, label %for.end, label %for.body
100
101for.end:                                          ; preds = %for.body, %entry
102  %s1.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
103  %s2.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add2, %for.body ]
104  %sub = fsub float %s1.0.lcssa, %s2.0.lcssa
105  ret float %sub
106}
107
108; rdar:15221834 False AVX register dependencies cause 5x slowdown on
109; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
110; to avoid cyclic dependence on a write to the same register in a
111; previous iteration.
112
113; AVX-LABEL: loopdep2:
114; AVX-LABEL: %loop
115; AVX: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
116; AVX: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
117; SSE-LABEL: loopdep2:
118; SSE-LABEL: %loop
119; SSE: xorps %[[REG:xmm.]], %[[REG]]
120; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
121define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
122entry:
123  %vx = load i64* %x
124  br label %loop
125loop:
126  %i = phi i64 [ 1, %entry ], [ %inc, %loop ]
127  %s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
128  %fi = sitofp i64 %i to double
129  %vy = load double* %y
130  %fipy = fadd double %fi, %vy
131  %iipy = fptosi double %fipy to i64
132  %s2 = add i64 %s1, %iipy
133  %inc = add nsw i64 %i, 1
134  %exitcond = icmp eq i64 %inc, 156250000
135  br i1 %exitcond, label %ret, label %loop
136ret:
137  ret i64 %s2
138}
139
140; This loop contains a cvtsi2sd instruction that has a loop-carried
141; false dependency on an xmm that is modified by other scalar instructions
142; that follow it in the loop. Additionally, the source of convert is a
143; memory operand. Verify the execution dependency fix pass breaks this
144; dependency by inserting a xor before the convert.
145@x = common global [1024 x double] zeroinitializer, align 16
146@y = common global [1024 x double] zeroinitializer, align 16
147@z = common global [1024 x double] zeroinitializer, align 16
148@w = common global [1024 x double] zeroinitializer, align 16
149@v = common global [1024 x i32] zeroinitializer, align 16
150
151define void @loopdep3() {
152entry:
153  br label %for.cond1.preheader
154
155for.cond1.preheader:                              ; preds = %for.inc14, %entry
156  %i.025 = phi i32 [ 0, %entry ], [ %inc15, %for.inc14 ]
157  br label %for.body3
158
159for.body3:
160  %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
161  %arrayidx = getelementptr inbounds [1024 x i32]* @v, i64 0, i64 %indvars.iv
162  %0 = load i32* %arrayidx, align 4
163  %conv = sitofp i32 %0 to double
164  %arrayidx5 = getelementptr inbounds [1024 x double]* @x, i64 0, i64 %indvars.iv
165  %1 = load double* %arrayidx5, align 8
166  %mul = fmul double %conv, %1
167  %arrayidx7 = getelementptr inbounds [1024 x double]* @y, i64 0, i64 %indvars.iv
168  %2 = load double* %arrayidx7, align 8
169  %mul8 = fmul double %mul, %2
170  %arrayidx10 = getelementptr inbounds [1024 x double]* @z, i64 0, i64 %indvars.iv
171  %3 = load double* %arrayidx10, align 8
172  %mul11 = fmul double %mul8, %3
173  %arrayidx13 = getelementptr inbounds [1024 x double]* @w, i64 0, i64 %indvars.iv
174  store double %mul11, double* %arrayidx13, align 8
175  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
176  %exitcond = icmp eq i64 %indvars.iv.next, 1024
177  br i1 %exitcond, label %for.inc14, label %for.body3
178
179for.inc14:                                        ; preds = %for.body3
180  %inc15 = add nsw i32 %i.025, 1
181  %exitcond26 = icmp eq i32 %inc15, 100000
182  br i1 %exitcond26, label %for.end16, label %for.cond1.preheader
183
184for.end16:                                        ; preds = %for.inc14
185  ret void
186
187;SSE-LABEL:@loopdep3
188;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
189;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
190;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
191;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
192;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
193;SSE-NEXT: movsd [[XMM0]],
194;AVX-LABEL:@loopdep3
195;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
196;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], [[XMM0]]
197;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
198;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
199;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
200;AVX-NEXT: vmovsd [[XMM0]],
201}
202