xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_float.S (revision 81ad6265)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD$");
30
31#include "aeabi_vfp.h"
32
33.fpu	vfp
34.syntax	unified
35
36/* void __aeabi_cfcmpeq(float, float) */
37AEABI_ENTRY(cfcmpeq)
38	LOAD_SREGS(s0, s1, r0, r1)
39	vcmp.f32	s0, s1
40	vmrs     	APSR_nzcv, fpscr
41	RET
42AEABI_END(cfcmpeq)
43
44/* void __aeabi_cfcmple(float, float) */
45AEABI_ENTRY(cfcmple)
46	LOAD_SREGS(s0, s1, r0, r1)
47	vcmpe.f32	s0, s1
48	vmrs     	APSR_nzcv, fpscr
49	RET
50AEABI_END(cfcmple)
51
52/* void __aeabi_cfrcmple(float, float) */
53AEABI_ENTRY(cfrcmple)
54	LOAD_SREGS(s0, s1, r0, r1)
55	vcmpe.f32	s1, s0
56	vmrs     	APSR_nzcv, fpscr
57	RET
58AEABI_END(cfrcmple)
59
60/* int __aeabi_fcmpeq(float, float) */
61AEABI_ENTRY(fcmpeq)
62	LOAD_SREGS(s0, s1, r0, r1)
63	vcmp.f32 s0, s1
64	vmrs     APSR_nzcv, fpscr
65	ite      ne
66	movne    r0, #0
67	moveq    r0, #1
68	RET
69AEABI_END(fcmpeq)
70
71/* int __aeabi_fcmplt(float, float) */
72AEABI_ENTRY(fcmplt)
73	LOAD_SREGS(s0, s1, r0, r1)
74	vcmp.f32 s0, s1
75	vmrs     APSR_nzcv, fpscr
76	ite      cs
77	movcs    r0, #0
78	movcc    r0, #1
79	RET
80AEABI_END(fcmplt)
81
82/* int __aeabi_fcmple(float, float) */
83AEABI_ENTRY(fcmple)
84	LOAD_SREGS(s0, s1, r0, r1)
85	vcmp.f32 s0, s1
86	vmrs     APSR_nzcv, fpscr
87	ite      hi
88	movhi    r0, #0
89	movls    r0, #1
90	RET
91AEABI_END(fcmple)
92
93/* int __aeabi_fcmpge(float, float) */
94AEABI_ENTRY(fcmpge)
95	LOAD_SREGS(s0, s1, r0, r1)
96	vcmp.f32 s0, s1
97	vmrs     APSR_nzcv, fpscr
98	ite      lt
99	movlt    r0, #0
100	movge    r0, #1
101	RET
102AEABI_END(fcmpge)
103
104/* int __aeabi_fcmpgt(float, float) */
105AEABI_ENTRY(fcmpgt)
106	LOAD_SREGS(s0, s1, r0, r1)
107	vcmp.f32 s0, s1
108	vmrs     APSR_nzcv, fpscr
109	ite      le
110	movle    r0, #0
111	movgt    r0, #1
112	RET
113AEABI_END(fcmpgt)
114
115/* int __aeabi_fcmpun(float, float) */
116AEABI_ENTRY(fcmpun)
117	LOAD_SREGS(s0, s1, r0, r1)
118	vcmp.f32 s0, s1
119	vmrs     APSR_nzcv, fpscr
120	ite      vc
121	movvc    r0, #0
122	movvs    r0, #1
123	RET
124AEABI_END(fcmpun)
125
126/* int __aeabi_f2iz(float) */
127AEABI_ENTRY(f2iz)
128	LOAD_SREG(s0, r0)
129#if 0
130	/*
131	 * This should be the correct instruction, but binutils incorrectly
132	 * encodes it as the version that used FPSCR to determine the rounding.
133	 * When binutils is fixed we can use this again.
134	 */
135	vcvt.s32.f32 s0, s0
136#else
137	ftosizs      s0, s0
138#endif
139	vmov         r0, s0
140	RET
141AEABI_END(f2iz)
142
143/* double __aeabi_f2d(float) */
144AEABI_ENTRY(f2d)
145	LOAD_SREG(s0, r0)
146	vcvt.f64.f32 d0, s0
147	UNLOAD_DREG(r0, r1, d0)
148	RET
149AEABI_END(f2d)
150
151/* float __aeabi_i2f(int) */
152AEABI_ENTRY(i2f)
153	vmov         s0, r0
154	vcvt.f32.s32 s0, s0
155	UNLOAD_SREG(r0, s0)
156	RET
157AEABI_END(i2f)
158
159/* float __aeabi_fadd(float, float) */
160AEABI_ENTRY(fadd)
161	LOAD_SREGS(s0, s1, r0, r1)
162	vadd.f32 s0, s0, s1
163	UNLOAD_SREG(r0, s0)
164	RET
165AEABI_END(fadd)
166
167/* float __aeabi_fmul(float, float) */
168AEABI_ENTRY(fdiv)
169	LOAD_SREGS(s0, s1, r0, r1)
170	vdiv.f32 s0, s0, s1
171	UNLOAD_SREG(r0, s0)
172	RET
173AEABI_END(fdiv)
174
175/* float __aeabi_fmul(float, float) */
176AEABI_ENTRY(fmul)
177	LOAD_SREGS(s0, s1, r0, r1)
178	vmul.f32 s0, s0, s1
179	UNLOAD_SREG(r0, s0)
180	RET
181AEABI_END(fmul)
182
183/* float __aeabi_fsub(float, float) */
184AEABI_ENTRY(fsub)
185	LOAD_SREGS(s0, s1, r0, r1)
186	vsub.f32 s0, s0, s1
187	UNLOAD_SREG(r0, s0)
188	RET
189AEABI_END(fsub)
190
191	.section .note.GNU-stack,"",%progbits
192