1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-apple-darwin -regalloc=basic | FileCheck %s --check-prefix=X64
4
5; This testcase should need to spill the -1 value on both x86-32 and x86-64,
6; so it shouldn't use pcmpeqd to materialize an all-ones vector; it
7; should use a constant-pool load instead.
8;
9; RAGreedy defeats the test by splitting live ranges.
10
11; There should be no pcmpeqd instructions, everybody should the constant pool.
12
13	%struct.__ImageExecInfo = type <{ <4 x i32>, <4 x float>, <2 x i64>, i8*, i8*, i8*, i32, i32, i32, i32, i32 }>
14	%struct._cl_image_format_t = type <{ i32, i32, i32 }>
15	%struct._image2d_t = type <{ i8*, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }>
16
17define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
18; X32-LABEL: program_1:
19; X32:       ## %bb.0: ## %entry
20; X32-NEXT:    pushl %esi
21; X32-NEXT:    subl $88, %esp
22; X32-NEXT:    cmpl $0, 0
23; X32-NEXT:    jle LBB0_2
24; X32-NEXT:  ## %bb.1: ## %forcond
25; X32-NEXT:    cmpl $0, 0
26; X32-NEXT:    jg LBB0_3
27; X32-NEXT:  LBB0_2: ## %ifthen
28; X32-NEXT:    addl $88, %esp
29; X32-NEXT:    popl %esi
30; X32-NEXT:    retl
31; X32-NEXT:  LBB0_3: ## %forbody
32; X32-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
33; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
34; X32-NEXT:    cvttps2dq %xmm1, %xmm0
35; X32-NEXT:    cvtdq2ps %xmm0, %xmm0
36; X32-NEXT:    subps %xmm0, %xmm1
37; X32-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
38; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
39; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
40; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
41; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
42; X32-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
43; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
44; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
45; X32-NEXT:    mulps %xmm1, %xmm0
46; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
47; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
48; X32-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
49; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
50; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
51; X32-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
52; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
53; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
54; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
55; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
56; X32-NEXT:    xorps %xmm0, %xmm0
57; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
58; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
59; X32-NEXT:    mulps %xmm0, %xmm0
60; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
61; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
62; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
63; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
64; X32-NEXT:    xorps %xmm0, %xmm0
65; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
66; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
67; X32-NEXT:    cmpunordps %xmm0, %xmm0
68; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
69; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
70; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
71; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
72; X32-NEXT:    xorps %xmm0, %xmm0
73; X32-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
74; X32-NEXT:    movl $0, (%esp)
75; X32-NEXT:    xorl %esi, %esi
76; X32-NEXT:    xorps %xmm3, %xmm3
77; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
78; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
79; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
80; X32-NEXT:    calll *%esi
81; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
82; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
83; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
84; X32-NEXT:    pxor %xmm1, %xmm1
85; X32-NEXT:    psubd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
86; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
87; X32-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
88; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
89; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
90; X32-NEXT:    por %xmm1, %xmm0
91; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
92; X32-NEXT:    pxor %xmm0, %xmm0
93; X32-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
94; X32-NEXT:    movl $0, (%esp)
95; X32-NEXT:    xorps %xmm3, %xmm3
96; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
97; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
98; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
99; X32-NEXT:    calll *%esi
100; X32-NEXT:    ud2
101;
102; X64-LABEL: program_1:
103; X64:       ## %bb.0: ## %entry
104; X64-NEXT:    pushq %rbx
105; X64-NEXT:    subq $64, %rsp
106; X64-NEXT:    cmpl $0, 0
107; X64-NEXT:    jle LBB0_2
108; X64-NEXT:  ## %bb.1: ## %forcond
109; X64-NEXT:    cmpl $0, 0
110; X64-NEXT:    jg LBB0_3
111; X64-NEXT:  LBB0_2: ## %ifthen
112; X64-NEXT:    addq $64, %rsp
113; X64-NEXT:    popq %rbx
114; X64-NEXT:    retq
115; X64-NEXT:  LBB0_3: ## %forbody
116; X64-NEXT:    xorps %xmm0, %xmm0
117; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
118; X64-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
119; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
120; X64-NEXT:    cvttps2dq %xmm1, %xmm0
121; X64-NEXT:    cvtdq2ps %xmm0, %xmm0
122; X64-NEXT:    subps %xmm0, %xmm1
123; X64-NEXT:    movaps %xmm1, (%rsp) ## 16-byte Spill
124; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
125; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
126; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
127; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
128; X64-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
129; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
130; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
131; X64-NEXT:    mulps %xmm1, %xmm0
132; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
133; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
134; X64-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
135; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
136; X64-NEXT:    movdqa (%rsp), %xmm0 ## 16-byte Reload
137; X64-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
138; X64-NEXT:    movdqa %xmm0, (%rsp) ## 16-byte Spill
139; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
140; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
141; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
142; X64-NEXT:    xorps %xmm0, %xmm0
143; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
144; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
145; X64-NEXT:    mulps %xmm0, %xmm0
146; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
147; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
148; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
149; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
150; X64-NEXT:    xorps %xmm0, %xmm0
151; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
152; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
153; X64-NEXT:    cmpunordps %xmm0, %xmm0
154; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
155; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
156; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
157; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
158; X64-NEXT:    xorl %ebx, %ebx
159; X64-NEXT:    xorps %xmm3, %xmm3
160; X64-NEXT:    xorps %xmm4, %xmm4
161; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
162; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
163; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 ## 16-byte Reload
164; X64-NEXT:    xorl %edi, %edi
165; X64-NEXT:    callq *%rbx
166; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
167; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
168; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
169; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
170; X64-NEXT:    psubd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
171; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
172; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
173; X64-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
174; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
175; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
176; X64-NEXT:    orps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
177; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
178; X64-NEXT:    xorps %xmm3, %xmm3
179; X64-NEXT:    xorps %xmm4, %xmm4
180; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
181; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
182; X64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
183; X64-NEXT:    xorl %edi, %edi
184; X64-NEXT:    callq *%rbx
185; X64-NEXT:    ud2
186entry:
187	%tmp3.i = load i32, i32* null		; <i32> [#uses=1]
188	%cmp = icmp slt i32 0, %tmp3.i		; <i1> [#uses=1]
189	br i1 %cmp, label %forcond, label %ifthen
190
191ifthen:		; preds = %entry
192	ret void
193
194forcond:		; preds = %entry
195	%tmp3.i536 = load i32, i32* null		; <i32> [#uses=1]
196	%cmp12 = icmp slt i32 0, %tmp3.i536		; <i1> [#uses=1]
197	br i1 %cmp12, label %forbody, label %afterfor
198
199forbody:		; preds = %forcond
200	%bitcast204.i104 = bitcast <4 x i32> zeroinitializer to <4 x float>		; <<4 x float>> [#uses=1]
201	%tmp78 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> < float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02 >, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=2]
202	%tmp79 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp78) nounwind		; <<4 x i32>> [#uses=1]
203	%tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp79) nounwind		; <<4 x float>> [#uses=1]
204	%sub140.i = fsub <4 x float> %tmp78, %tmp80		; <<4 x float>> [#uses=2]
205	%mul166.i = fmul <4 x float> zeroinitializer, %sub140.i		; <<4 x float>> [#uses=1]
206	%add167.i = fadd <4 x float> %mul166.i, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 >		; <<4 x float>> [#uses=1]
207	%mul171.i = fmul <4 x float> %add167.i, %sub140.i		; <<4 x float>> [#uses=1]
208	%add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 >		; <<4 x float>> [#uses=1]
209	%bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32>		; <<4 x i32>> [#uses=1]
210	%andnps178.i = add <4 x i32> %bitcast176.i, <i32 1, i32 1, i32 1, i32 1>		; <<4 x i32>> [#uses=1]
211	%bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float>		; <<4 x float>> [#uses=1]
212	%mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer		; <<4 x float>> [#uses=1]
213	%bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32>		; <<4 x i32>> [#uses=1]
214	%andnps192.i = add <4 x i32> %bitcast190.i, <i32 1, i32 1, i32 1, i32 1>		; <<4 x i32>> [#uses=1]
215	%xorps.i = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
216	%orps203.i = add <4 x i32> %andnps192.i, %xorps.i		; <<4 x i32>> [#uses=1]
217	%bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float>		; <<4 x float>> [#uses=1]
218	%mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer		; <<4 x float>> [#uses=2]
219	%mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer		; <<4 x float>> [#uses=1]
220	%cmpunord.i11 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 3) nounwind		; <<4 x float>> [#uses=1]
221	%tmp83 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul310, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=1]
222	%bitcast.i3 = bitcast <4 x float> %mul310 to <4 x i32>		; <<4 x i32>> [#uses=1]
223	%andps.i5 = and <4 x i32> %bitcast.i3, zeroinitializer		; <<4 x i32>> [#uses=1]
224
225	call void null(<4 x float> %mul313, <4 x float> %cmpunord.i11, <4 x float> %tmp83, <4 x float> zeroinitializer, %struct.__ImageExecInfo* null, <4 x i32> zeroinitializer) nounwind
226
227	%tmp84 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul313, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=1]
228
229	%bitcast6.i13 = bitcast <4 x float> %cmpunord.i11 to <4 x i32>		; <<4 x i32>> [#uses=2]
230	%andps.i14 = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %bitcast6.i13		; <<4 x i32>> [#uses=1]
231	%not.i16 = xor <4 x i32> %bitcast6.i13, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
232	%andnps.i17 = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %not.i16		; <<4 x i32>> [#uses=1]
233	%orps.i18 = or <4 x i32> %andnps.i17, %andps.i14		; <<4 x i32>> [#uses=1]
234	%bitcast17.i19 = bitcast <4 x i32> %orps.i18 to <4 x float>		; <<4 x float>> [#uses=1]
235
236	%bitcast11.i6 = bitcast <4 x float> %tmp83 to <4 x i32>		; <<4 x i32>> [#uses=1]
237	%not.i7 = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
238	%andnps.i8 = and <4 x i32> %bitcast11.i6, %not.i7		; <<4 x i32>> [#uses=1]
239	%orps.i9 = or <4 x i32> %andnps.i8, %andps.i5		; <<4 x i32>> [#uses=1]
240	%bitcast17.i10 = bitcast <4 x i32> %orps.i9 to <4 x float>		; <<4 x float>> [#uses=1]
241
242	%bitcast6.i = bitcast <4 x float> zeroinitializer to <4 x i32>		; <<4 x i32>> [#uses=2]
243	%andps.i = and <4 x i32> zeroinitializer, %bitcast6.i		; <<4 x i32>> [#uses=1]
244	%bitcast11.i = bitcast <4 x float> %tmp84 to <4 x i32>		; <<4 x i32>> [#uses=1]
245	%not.i = xor <4 x i32> %bitcast6.i, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
246	%andnps.i = and <4 x i32> %bitcast11.i, %not.i		; <<4 x i32>> [#uses=1]
247	%orps.i = or <4 x i32> %andnps.i, %andps.i		; <<4 x i32>> [#uses=1]
248	%bitcast17.i = bitcast <4 x i32> %orps.i to <4 x float>		; <<4 x float>> [#uses=1]
249	call void null(<4 x float> %bitcast17.i19, <4 x float> %bitcast17.i10, <4 x float> %bitcast17.i, <4 x float> zeroinitializer, %struct.__ImageExecInfo* null, <4 x i32> zeroinitializer) nounwind
250	unreachable
251
252afterfor:		; preds = %forcond
253	ret void
254}
255
256declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
257
258declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
259
260declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
261
262declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
263