xref: /netbsd/lib/libc_vfp/vfpsf.S (revision 81566fb3)
1/*-
2 * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <arm/asm.h>
31#include <arm/vfpreg.h>
32
33RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
34
35/*
36 * This file provides softfloat compatible routines which use VFP instructions
37 * to do the actual work.  This should give near hard-float performance while
38 * being compatible with soft-float code.
39 *
40 * This file implements the single precision floating point routines.
41 */
42
43ENTRY(__addsf3)
44	vmov		s0, s1, r0, r1
45	vadd.f32	s0, s0, s1
46	vmov		r0, s0
47	RET
48END(__addsf3)
49
50ENTRY(__subsf3)
51	vmov		s0, s1, r0, r1
52	vsub.f32	s0, s0, s1
53	vmov		r0, s0
54	RET
55END(__subsf3)
56
57ENTRY(__mulsf3)
58	vmov		s0, s1, r0, r1
59	vmul.f32	s0, s0, s1
60	vmov		r0, s0
61	RET
62END(__mulsf3)
63
64ENTRY(__divsf3)
65	vmov		s0, s1, r0, r1
66	vdiv.f32	s0, s0, s1
67	vmov		r0, s0
68	RET
69END(__divsf3)
70
71ENTRY(__negsf2)
72	vmov		s0, r0
73	vneg.f32	s0, s0
74	vmov		r0, s0
75	RET
76END(__negsf2)
77
78ENTRY(__truncdfsf2)
79#ifdef __ARMEL__
80	vmov		d0, r0, r1
81#else
82	vmov		d0, r1, r0
83#endif
84	vcvt.f32.f64	s0, d0
85	vmov		r0, s0
86	RET
87END(__truncdfsf2)
88
89ENTRY(__fixsfsi)
90	vmov		s0, r0
91	vcvt.s32.f32	s0, s0
92	vmov		r0, s0
93	RET
94END(__fixsfsi)
95
96ENTRY(__fixunssfsi)
97	vmov		s0, r0
98	vcvt.u32.f32	s0, s0
99	vmov		r0, s0
100	RET
101END(__fixunssfsi)
102
103ENTRY(__floatsisf)
104	vmov		s0, r0
105	vcvt.f32.s32	s0, s0
106	vmov		r0, s0
107	RET
108END(__floatsisf)
109
110ENTRY(__floatunsisf)
111	vmov		s0, r0
112	vcvt.f32.u32	s0, s0
113	vmov		r0, s0
114	RET
115END(__floatunsisf)
116
117/* N set if compare <= result */
118/* Z set if compare = result */
119/* C set if compare (=,>=,UNORD) result */
120/* V set if compare UNORD result */
121
122STRONG_ALIAS(__eqsf2, __nesf2)
123ENTRY(__nesf2)
124	vmov		s0, s1, r0, r1
125	vcmp.f32	s0, s1
126	vmrs		APSR_nzcv, fpscr
127	moveq		r0, #0		/* !(a == b) */
128	movne		r0, #1		/* !(a == b) */
129	RET
130END(__nesf2)
131
132STRONG_ALIAS(__gesf2, __ltsf2)
133ENTRY(__ltsf2)
134	vmov		s0, s1, r0, r1
135	vcmp.f32	s0, s1
136	vmrs		APSR_nzcv, fpscr
137	mvnmi		r0, #0		/* -(a < b) */
138	movpl		r0, #0		/* -(a < b) */
139	RET
140END(__ltsf2)
141
142STRONG_ALIAS(__gtsf2, __lesf2)
143ENTRY(__lesf2)
144	vmov		s0, s1, r0, r1
145	vcmp.f32	s0, s1
146	vmrs		APSR_nzcv, fpscr
147	movgt		r0, #1		/* (a > b) */
148	movle		r0, #0		/* (a > b) */
149	RET
150END(__lesf2)
151
152ENTRY(__unordsf2)
153	vmov		s0, s1, r0, r1
154	vcmp.f32	s0, s1
155	vmrs		APSR_nzcv, fpscr
156	movvs		r0, #1		/* isnan(a) || isnan(b) */
157	movvc		r0, #0		/* isnan(a) || isnan(b) */
158	RET
159END(__unordsf2)
160