1/* mculib libgcc routines of Andes NDS32 cpu for GNU compiler
2   Copyright (C) 2012-2021 Free Software Foundation, Inc.
3   Contributed by Andes Technology Corporation.
4
5   This file is part of GCC.
6
7   GCC is free software; you can redistribute it and/or modify it
8   under the terms of the GNU General Public License as published
9   by the Free Software Foundation; either version 3, or (at your
10   option) any later version.
11
12   GCC is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   Under Section 7 of GPL version 3, you are granted additional
18   permissions described in the GCC Runtime Library Exception, version
19   3.1, as published by the Free Software Foundation.
20
21   You should have received a copy of the GNU General Public License and
22   a copy of the GCC Runtime Library Exception along with this program;
23   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24   <http://www.gnu.org/licenses/>.  */
25
26	.section	.mdebug.abi_nds32
27	.previous
28
29
30/* ------------------------------------------- */
31/* FPBIT floating point operations for libgcc  */
32/* ------------------------------------------- */
33
34#ifdef L_addsub_sf
35
36	.text
37	.align	2
38	.global	__subsf3
39	.type	__subsf3, @function
40__subsf3:
41	push    $lp
42	pushm   $r6, $r9
43
44	move    $r2, #0x80000000
45	xor     $r1, $r1, $r2
46
47	j       .Lsfpadd
48
49	.global	__addsf3
50	.type	__addsf3, @function
51__addsf3:
52	push    $lp
53	pushm   $r6, $r9
54.Lsfpadd:
55	srli    $r5, $r0, #23
56	andi    $r5, $r5, #0xff
57	srli    $r7, $r1, #23
58	andi    $r7, $r7, #0xff
59	move    $r3, #0x80000000
60	slli    $r4, $r0, #8
61	or      $r4, $r4, $r3
62	slli    $r6, $r1, #8
63	or      $r6, $r6, $r3
64
65	addi    $r9, $r5, #-1
66	slti    $r15, $r9, #0xfe
67	beqzs8  .LEspecA
68
69.LElab1:
70	addi    $r9, $r7, #-1
71	slti    $r15, $r9, #0xfe
72	beqzs8  .LEspecB
73
74.LElab2:
75	sub     $r8, $r5, $r7
76	sltsi   $r15, $r8, #0
77	bnezs8  .Li1
78	sltsi   $r15, $r8, #0x20
79	bnezs8  .Li2
80	move    $r6, #2
81	j       .Le1
82.Li2:
83	move    $r2, $r6
84	srl     $r6, $r6, $r8
85	sll     $r9, $r6, $r8
86	beq     $r9, $r2, .Le1
87	ori     $r6, $r6, #2
88	j       .Le1
89.Li1:
90	move    $r5, $r7
91	subri   $r8, $r8, #0
92	sltsi   $r15, $r8, #0x20
93	bnezs8  .Li4
94	move    $r4, #2
95	j       .Le1
96.Li4:
97	move    $r2, $r4
98	srl     $r4, $r4, $r8
99	sll     $r9, $r4, $r8
100	beq     $r9, $r2, .Le1
101	ori     $r4, $r4, #2
102
103.Le1:
104	and     $r8, $r0, $r3
105	xor     $r9, $r8, $r1
106	sltsi   $r15, $r9, #0
107	bnezs8  .LEsub1
108
109	#ADD($r4, $r6)
110	add     $r4, $r4, $r6
111	slt     $r15, $r4, $r6
112	beqzs8  .LEres
113	andi    $r9, $r4, #1
114	beqz    $r9, .Li7
115	ori     $r4, $r4, #2
116.Li7:
117	srli    $r4, $r4, #1
118	addi    $r5, $r5, #1
119	subri   $r15, $r5, #0xff
120	bnezs8  .LEres
121	move    $r4, #0
122	j       .LEres
123
124.LEsub1:
125	#SUB($r4, $r6)
126	move    $r15, $r4
127	sub     $r4, $r4, $r6
128	slt     $r15, $r15, $r4
129	beqzs8  .Li9
130	subri   $r4, $r4, #0
131	xor     $r8, $r8, $r3
132	j       .Le9
133.Li9:
134	beqz    $r4, .LEzer
135.Le9:
136#ifdef __NDS32_PERF_EXT__
137	clz	$r2, $r4
138#else
139	pushm	$r0, $r1
140	pushm	$r3, $r5
141	move	$r0, $r4
142	bal	__clzsi2
143	move	$r2, $r0
144	popm	$r3, $r5
145	popm	$r0, $r1
146#endif
147	sub     $r5, $r5, $r2
148	sll     $r4, $r4, $r2
149
150.LEres:
151	blez    $r5, .LEund
152
153.LElab12:
154	#ADD($r4, $0x80)
155	move    $r15, #0x80
156	add     $r4, $r4, $r15
157	slt     $r15, $r4, $r15
158
159	#ADDC($r5, $0x0)
160	add     $r5, $r5, $r15
161	srli    $r9, $r4, #8
162	andi    $r9, $r9, #1
163	sub     $r4, $r4, $r9
164	slli    $r4, $r4, #1
165	srli    $r4, $r4, #9
166	slli    $r9, $r5, #23
167	or      $r4, $r4, $r9
168	or      $r0, $r4, $r8
169
170.LE999:
171	popm    $r6, $r9
172	pop     $lp
173	ret5    $lp
174
175.LEund:
176	subri   $r2, $r5, #1
177	slti    $r15, $r2, #0x20
178	beqzs8  .LEzer
179	move    $r9, #0x80000000
180	or      $r4, $r4, $r9
181	subri   $r9, $r2, #0x20
182	sll     $r5, $r4, $r9
183	srl     $r4, $r4, $r2
184	beqz    $r5, .Li10
185	ori     $r4, $r4, #1
186.Li10:
187	move    $r5, #0
188	addi    $r9, $r4, #0x80
189	sltsi   $r15, $r9, #0
190	beqzs8  .LElab12
191	move    $r5, #1
192	j       .LElab12
193
194.LEspecA:
195	bnez    $r5, .Li12
196	add     $r4, $r4, $r4
197	beqz    $r4, .Li13
198#ifdef __NDS32_PERF_EXT__
199	clz	$r8, $r4
200#else
201	pushm	$r0, $r5
202	move	$r0, $r4
203	bal	__clzsi2
204	move	$r8, $r0
205	popm	$r0, $r5
206#endif
207	sub     $r5, $r5, $r8
208	sll     $r4, $r4, $r8
209	j       .LElab1
210.Li13:
211	subri   $r15, $r7, #0xff
212	beqzs8  .LEspecB
213	move    $r9, #0x80000000
214	bne     $r1, $r9, .LEretB
215.Li12:
216	add     $r9, $r4, $r4
217	bnez    $r9, .LEnan
218	subri   $r15, $r7, #0xff
219	bnezs8  .LEretA
220	xor     $r9, $r0, $r1
221	sltsi   $r15, $r9, #0
222	bnezs8  .LEnan
223	j       .LEretB
224
225.LEspecB:
226	bnez    $r7, .Li15
227	add     $r6, $r6, $r6
228	beqz    $r6, .LEretA
229#ifdef __NDS32_PERF_EXT__
230	clz	$r8, $r6
231#else
232	pushm	$r0, $r5
233	move	$r0, $r6
234	bal	__clzsi2
235	move	$r8, $r0
236	popm	$r0, $r5
237#endif
238	sub     $r7, $r7, $r8
239	sll     $r6, $r6, $r8
240	j       .LElab2
241.Li15:
242	add     $r9, $r6, $r6
243	bnez    $r9, .LEnan
244
245.LEretB:
246	move    $r0, $r1
247	j       .LE999
248
249.LEretA:
250	j       .LE999
251
252.LEzer:
253	move    $r0, #0
254	j       .LE999
255
256.LEnan:
257	move    $r0, #0xffc00000
258	j       .LE999
259	.size	__subsf3, .-__subsf3
260	.size	__addsf3, .-__addsf3
261#endif /* L_addsub_sf */
262
263
264
265#ifdef L_sf_to_si
266
267	.text
268	.align	2
269	.global	__fixsfsi
270	.type	__fixsfsi, @function
271__fixsfsi:
272	push    $lp
273
274	slli    $r1, $r0, #8
275	move    $r3, #0x80000000
276	or      $r1, $r1, $r3
277	srli    $r3, $r0, #23
278	andi    $r3, $r3, #0xff
279	subri   $r2, $r3, #0x9e
280	blez    $r2, .LJspec
281	sltsi   $r15, $r2, #0x20
282	bnezs8  .Li42
283	move    $r0, #0
284	j       .LJ999
285.Li42:
286	srl     $r1, $r1, $r2
287	sltsi   $r15, $r0, #0
288	beqzs8  .Li43
289	subri   $r1, $r1, #0
290.Li43:
291	move    $r0, $r1
292
293.LJ999:
294	pop     $lp
295	ret5    $lp
296
297.LJspec:
298	move    $r3, #0x7f800000
299	slt     $r15, $r3, $r0
300	beqzs8  .Li44
301	move    $r0, #0x80000000
302	j       .LJ999
303.Li44:
304	move    $r0, #0x7fffffff
305	j       .LJ999
306	.size	__fixsfsi, .-__fixsfsi
307#endif /* L_sf_to_si */
308
309
310
311#ifdef L_divsi3
312
313	.text
314	.align	2
315	.globl	__divsi3
316	.type	__divsi3, @function
317__divsi3:
318	! ---------------------------------------------------------------------
319	! neg = 0;
320	! if (a < 0)
321	! {   a = -a;
322	!     neg = !neg;
323	! }
324	! ---------------------------------------------------------------------
325	sltsi	$r5, $r0, 0			! $r5  <- neg = (a < 0) ? 1 : 0
326	subri	$r4, $r0, 0			! $r4  <- a = -a
327	cmovn	$r0, $r4, $r5			! $r0  <- a = neg ? -a : a
328.L2:
329	! ---------------------------------------------------------------------
330	! if (b < 0)
331	! ---------------------------------------------------------------------
332	bgez	$r1, .L3			! if b >= 0, skip
333	! ---------------------------------------------------------------------
334	! {   b=-b;
335	!     neg=!neg;
336	! }
337	! ---------------------------------------------------------------------
338	subri	$r1, $r1, 0			! $r1  <- b = -b
339	subri	$r5, $r5, 1			! $r5  <- neg = !neg
340.L3:
341	! ---------------------------------------------------------------------
342	!!res = udivmodsi4 (a, b, 1);
343	! res = 0;
344	! if (den != 0)
345	! ---------------------------------------------------------------------
346	movi	$r2, 0				! $r2  <- res = 0
347	beqz	$r1, .L1			! if den == 0, skip
348	! ---------------------------------------------------------------------
349	! bit = 1;
350	! ---------------------------------------------------------------------
351	movi	$r4, 1				! $r4  <- bit = 1
352#ifndef __OPTIMIZE_SIZE__
353.L6:
354#endif
355	! ---------------------------------------------------------------------
356	! while (den < num && bit && !(den & (1L << 31)))
357	! ---------------------------------------------------------------------
358	slt	$ta, $r1, $r0			! $ta  <- den < num ?
359	beqz	$ta, .L5			! if no, skip
360	! ---------------------------------------------------------------------
361	! {   den << = 1;
362	!     bit << = 1;
363	! }
364	! ---------------------------------------------------------------------
365#if defined (__OPTIMIZE_SIZE__) && !defined (__NDS32_ISA_V3M__)
366	clz	$r3, $r1			! $r3  <- leading zero count for den
367	clz	$ta, $r0			! $ta  <- leading zero count for num
368	sub	$r3, $r3, $ta			! $r3  <- number of bits to shift
369	sll	$r1, $r1, $r3			! $r1  <- den
370	sll	$r4, $r4, $r3			! $r2  <- bit
371#else
372	slli	$r1, $r1, 1			! $r1  <- den << = 1
373	slli	$r4, $r4, 1			! $r4  <- bit << = 1
374	b	.L6				! continue loop
375#endif
376.L5:
377	! ---------------------------------------------------------------------
378	! while (bit)
379	! {   if (num >= den)
380	! ---------------------------------------------------------------------
381	slt	$ta, $r0, $r1			! $ta  <- num < den ?
382	bnez	$ta, .L9			! if yes, skip
383	! ---------------------------------------------------------------------
384	!     {   num -= den;
385	!         res |= bit;
386	!     }
387	! ---------------------------------------------------------------------
388	sub	$r0, $r0, $r1			! $r0  <- num -= den
389	or	$r2, $r2, $r4			! $r2  <- res |= bit
390.L9:
391	! ---------------------------------------------------------------------
392	!     bit >> = 1;
393	!     den >> = 1;
394	! }
395	!!if (modwanted)
396	!!    return num;
397	!!return res;
398	! ---------------------------------------------------------------------
399	srli	$r4, $r4, 1			! $r4  <- bit >> = 1
400	srli	$r1, $r1, 1			! $r1  <- den >> = 1
401	bnez	$r4, .L5			! if bit != 0, continue loop
402.L1:
403	! ---------------------------------------------------------------------
404	! if (neg)
405	!     res = -res;
406	! return res;
407	! ---------------------------------------------------------------------
408	subri	$r0, $r2, 0			! $r0  <- -res
409	cmovz	$r0, $r2, $r5			! $r0  <- neg ? -res : res
410	! ---------------------------------------------------------------------
411	ret
412	.size	__divsi3, .-__divsi3
413#endif /* L_divsi3 */
414
415
416
417#ifdef L_divdi3
418
419	!--------------------------------------
420	#ifdef __big_endian__
421		#define  V1H  $r0
422		#define  V1L  $r1
423		#define  V2H  $r2
424		#define  V2L  $r3
425	#else
426		#define  V1H  $r1
427		#define  V1L  $r0
428		#define  V2H  $r3
429		#define  V2L  $r2
430	#endif
431	!--------------------------------------
432	.text
433	.align	2
434	.globl	__divdi3
435	.type	__divdi3, @function
436__divdi3:
437	! prologue
438#ifdef __NDS32_ISA_V3M__
439	push25	$r10, 0
440#else
441	smw.adm	$r6, [$sp], $r10, 2
442#endif
443	! end of prologue
444	move	$r8, V1L
445	move	$r9, V1H
446	move	$r6, V2L
447	move	$r7, V2H
448	movi	$r10, 0
449	bgez	V1H, .L80
450	bal	__negdi2
451	move	$r8, V1L
452	move	$r9, V1H
453	movi	$r10, -1
454.L80:
455	bgez	$r7, .L81
456	move	V1L, $r6
457	move	V1H, $r7
458	bal	__negdi2
459	move	$r6, V1L
460	move	$r7, V1H
461	nor	$r10, $r10, $r10
462.L81:
463	move	V2L, $r6
464	move	V2H, $r7
465	move	V1L, $r8
466	move	V1H, $r9
467	movi	$r4, 0
468	bal	__udivmoddi4
469	beqz	$r10, .L82
470	bal	__negdi2
471.L82:
472	! epilogue
473#ifdef __NDS32_ISA_V3M__
474	pop25	$r10, 0
475#else
476	lmw.bim	$r6, [$sp], $r10, 2
477	ret
478#endif
479	.size	__divdi3, .-__divdi3
480#endif /* L_divdi3 */
481
482
483
484#ifdef L_modsi3
485
486	.text
487	.align	2
488	.globl	__modsi3
489	.type	__modsi3, @function
490__modsi3:
491	! ---------------------------------------------------------------------
492	! neg=0;
493	! if (a<0)
494	! {   a=-a;
495	!     neg=1;
496	! }
497	! ---------------------------------------------------------------------
498	sltsi	$r5, $r0, 0			! $r5  <- neg < 0 ? 1 : 0
499	subri	$r4, $r0, 0			! $r4  <- -a
500	cmovn	$r0, $r4, $r5			! $r0  <- |a|
501	! ---------------------------------------------------------------------
502	! if (b < 0)
503#ifndef __NDS32_PERF_EXT__
504	! ---------------------------------------------------------------------
505	bgez	$r1, .L3			! if b >= 0, skip
506	! ---------------------------------------------------------------------
507	!     b = -b;
508	! ---------------------------------------------------------------------
509	subri	$r1, $r1, 0			! $r1  <- |b|
510.L3:
511	! ---------------------------------------------------------------------
512	!!res = udivmodsi4 (a, b, 1);
513	! if (den != 0)
514	! ---------------------------------------------------------------------
515#else /* __NDS32_PERF_EXT__ */
516	!     b = -b;
517	!!res = udivmodsi4 (a, b, 1);
518	! if (den != 0)
519	! ---------------------------------------------------------------------
520	abs	$r1, $r1			! $r1  <- |b|
521#endif /* __NDS32_PERF_EXT__ */
522	beqz	$r1, .L1			! if den == 0, skip
523	! ---------------------------------------------------------------------
524	! {   bit = 1;
525	!     res = 0;
526	! ---------------------------------------------------------------------
527	movi	$r4, 1				! $r4  <- bit = 1
528#ifndef __OPTIMIZE_SIZE__
529.L6:
530#endif
531	! ---------------------------------------------------------------------
532	!     while (den < num&&bit && !(den & (1L << 31)))
533	! ---------------------------------------------------------------------
534	slt	$ta, $r1, $r0			! $ta  <- den < num ?
535	beqz	$ta, .L5			! if no, skip
536	! ---------------------------------------------------------------------
537	!     {   den << = 1;
538	!         bit << = 1;
539	!     }
540	! ---------------------------------------------------------------------
541#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
542	clz	$r3, $r1			! $r3  <- leading zero count for den
543	clz	$ta, $r0			! $ta  <- leading zero count for num
544	sub	$r3, $r3, $ta			! $r3  <- number of bits to shift
545	sll	$r1, $r1, $r3			! $r1  <- den
546	sll	$r4, $r4, $r3			! $r2  <- bit
547#else
548	slli	$r1, $r1, 1			! $r1  <- den << = 1
549	slli	$r4, $r4, 1			! $r4  <- bit << = 1
550	b	.L6				! continue loop
551#endif
552.L5:
553	! ---------------------------------------------------------------------
554	!     while (bit)
555	!     {   if (num >= den)
556	!         {   num -= den;
557	!             res |= bit;
558	!         }
559	!         bit >> = 1;
560	!         den >> = 1;
561	!     }
562	! }
563	!!if (modwanted)
564	!!    return num;
565	!!return res;
566	! ---------------------------------------------------------------------
567	sub	$r2, $r0, $r1			! $r2  <- num - den
568	slt	$ta, $r0, $r1			! $ta  <- num < den ?
569	srli	$r4, $r4, 1			! $r4  <- bit >> = 1
570	cmovz	$r0, $r2, $ta			! $r0  <- num = (num < den) ? num : num - den
571	srli	$r1, $r1, 1			! $r1  <- den >> = 1
572	bnez	$r4, .L5			! if bit != 0, continue loop
573.L1:
574	! ---------------------------------------------------------------------
575	! if (neg)
576	!     res = -res;
577	! return res;
578	! ---------------------------------------------------------------------
579	subri	$r3, $r0, 0			! $r3  <- -res
580	cmovn	$r0, $r3, $r5			! $r0  <- neg ? -res : res
581	! ---------------------------------------------------------------------
582	ret
583	.size	__modsi3, .-__modsi3
584#endif /* L_modsi3 */
585
586
587
588#ifdef L_moddi3
589
590	!--------------------------------------
591	#ifdef __big_endian__
592		#define  V1H  $r0
593		#define  V1L  $r1
594		#define  V2H  $r2
595		#define  V2L  $r3
596	#else
597		#define  V1H  $r1
598		#define  V1L  $r0
599		#define  V2H  $r3
600		#define  V2L  $r2
601	#endif
602	!--------------------------------------
603	.text
604	.align	2
605	.globl	__moddi3
606	.type	__moddi3, @function
607__moddi3:
608	! =====================================================================
609	! stack allocation:
610	! sp+32 +-----------------------+
611	!       | $lp                   |
612	! sp+28 +-----------------------+
613	!       | $r6 - $r10            |
614	! sp+8  +-----------------------+
615	!       |                       |
616	! sp+4  +-----------------------+
617	!       |                       |
618	! sp    +-----------------------+
619	! =====================================================================
620	! prologue
621#ifdef __NDS32_ISA_V3M__
622	push25	$r10, 8
623#else
624	smw.adm	$r6, [$sp], $r10, 2
625	addi	$sp, $sp, -8
626#endif
627	! end of prologue
628	!------------------------------------------
629	! 	__moddi3 (DWtype u, DWtype v)
630	!		{
631	!			word_type c = 0;
632	!			DWunion uu = {.ll = u};
633	!			DWunion vv = {.ll = v};
634	!			DWtype w;
635	!		if (uu.s.high < 0)
636	!  		  c = ~c,
637	!		  uu.ll = -uu.ll;
638	!---------------------------------------------
639	move	$r8, V1L
640	move	$r9, V1H
641	move	$r6, V2L
642	move	$r7, V2H
643	movi	$r10, 0        ! r10 = c = 0
644	bgez	V1H, .L80      ! if u > 0 , go L80
645	bal	__negdi2
646	move	$r8, V1L
647	move	$r9, V1H
648	movi	$r10, -1       ! r10 = c = ~c
649	!------------------------------------------------
650	!	 	if (vv.s.high < 0)
651	!		  vv.ll = -vv.ll;
652	!----------------------------------------------
653.L80:
654	bgez	$r7, .L81     !  if v > 0 , go L81
655	move	V1L, $r6
656	move	V1H, $r7
657	bal	__negdi2
658	move	$r6, V1L
659	move	$r7, V1H
660	!------------------------------------------
661	!		(void) __udivmoddi4 (uu.ll, vv.ll, &w);
662	!		if (c)
663	!		  w = -w;
664	!		return w;
665	!-----------------------------------------
666.L81:
667	move	V2L, $r6
668	move	V2H, $r7
669	move	V1L, $r8
670	move	V1H, $r9
671	addi	$r4, $sp, 0
672	bal	__udivmoddi4
673	lwi	$r0, [$sp+(0)]    ! le: sp + 0 is low, be: sp + 0 is high
674	lwi	$r1, [$sp+(4)]    ! le: sp + 4 is low, be: sp + 4 is high
675	beqz	$r10, .L82
676	bal	__negdi2
677.L82:
678	! epilogue
679#ifdef __NDS32_ISA_V3M__
680	pop25	$r10, 8
681#else
682	addi	$sp, $sp, 8
683	lmw.bim	$r6, [$sp], $r10, 2
684	ret
685#endif
686	.size	__moddi3, .-__moddi3
687#endif /* L_moddi3 */
688
689
690
691#ifdef L_mulsi3
692
693	.text
694	.align	2
695	.globl	__mulsi3
696	.type	__mulsi3, @function
697__mulsi3:
698	! ---------------------------------------------------------------------
699	! r = 0;
700	! while (a)
701	! $r0:       r
702	! $r1:       b
703	! $r2:       a
704	! ---------------------------------------------------------------------
705	beqz	$r0, .L7			! if a == 0, done
706	move	$r2, $r0			! $r2  <- a
707	movi	$r0, 0				! $r0  <- r <- 0
708.L8:
709	! ---------------------------------------------------------------------
710	! {   if (a & 1)
711	!         r += b;
712	!     a >> = 1;
713	!     b << = 1;
714	! }
715	! $r0:       r
716	! $r1:       b
717	! $r2:       a
718	! $r3:       scratch
719	! $r4:       scratch
720	! ---------------------------------------------------------------------
721	andi	$r3, $r2, 1			! $r3  <- a & 1
722	add	$r4, $r0, $r1			! $r4  <- r += b
723	cmovn	$r0, $r4, $r3			! $r0  <- r
724	srli	$r2, $r2, 1			! $r2  <- a >> = 1
725	slli	$r1, $r1, 1			! $r1  <- b << = 1
726	bnez	$r2, .L8			! if a != 0, continue loop
727.L7:
728	! ---------------------------------------------------------------------
729	! $r0:       return code
730	! ---------------------------------------------------------------------
731	ret
732	.size	__mulsi3, .-__mulsi3
733#endif /* L_mulsi3 */
734
735
736
737#ifdef L_udivsi3
738
739	.text
740	.align	2
741	.globl	__udivsi3
742	.type	__udivsi3, @function
743__udivsi3:
744	! ---------------------------------------------------------------------
745	!!res=udivmodsi4(a,b,0);
746	! res=0;
747	! if (den!=0)
748	! ---------------------------------------------------------------------
749	movi	$r2, 0				! $r2  <- res=0
750	beqz	$r1, .L1			! if den==0, skip
751	! ---------------------------------------------------------------------
752	! {   bit=1;
753	! ---------------------------------------------------------------------
754	movi	$r4, 1				! $r4  <- bit=1
755#ifndef __OPTIMIZE_SIZE__
756.L6:
757#endif
758	! ---------------------------------------------------------------------
759	!     while (den<num
760	! ---------------------------------------------------------------------
761	slt	$ta, $r1, $r0			! $ta  <- den<num?
762	beqz	$ta, .L5			! if no, skip
763	! ---------------------------------------------------------------------
764	!          &&bit&&!(den&(1L<<31)))
765	! ---------------------------------------------------------------------
766	bltz	$r1, .L5			! if den<0, skip
767	! ---------------------------------------------------------------------
768	!     {   den<<=1;
769	!         bit<<=1;
770	!     }
771	! ---------------------------------------------------------------------
772#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
773	clz	$r3, $r1			! $r3  <- leading zero count for den
774	clz	$ta, $r0			! $ta  <- leading zero count for num
775	sub	$r3, $r3, $ta			! $r3  <- number of bits to shift
776	sll	$r1, $r1, $r3			! $r1  <- den
777	sll	$r2, $r2, $r3			! $r2  <- bit
778#else
779	slli	$r1, $r1, 1			! $r1  <- den<<=1
780	slli	$r4, $r4, 1			! $r4  <- bit<<=1
781	b	.L6				! continue loop
782#endif
783.L5:
784	! ---------------------------------------------------------------------
785	!     while (bit)
786	!     {   if (num>=den)
787	! ---------------------------------------------------------------------
788	slt	$ta, $r0, $r1			! $ta  <- num<den?
789	bnez	$ta, .L9			! if yes, skip
790	! ---------------------------------------------------------------------
791	!         {   num-=den;
792	!             res|=bit;
793	!         }
794	! ---------------------------------------------------------------------
795	sub	$r0, $r0, $r1			! $r0  <- num-=den
796	or	$r2, $r2, $r4			! $r2  <- res|=bit
797.L9:
798	! ---------------------------------------------------------------------
799	!         bit>>=1;
800	!         den>>=1;
801	!     }
802	! }
803	!!if (modwanted)
804	!!    return num;
805	!!return res;
806	! ---------------------------------------------------------------------
807	srli	$r4, $r4, 1			! $r4  <- bit>>=1
808	srli	$r1, $r1, 1			! $r1  <- den>>=1
809	bnez	$r4, .L5			! if bit!=0, continue loop
810.L1:
811	! ---------------------------------------------------------------------
812	! return res;
813	! ---------------------------------------------------------------------
814	move	$r0, $r2			! $r0  <- return value
815	! ---------------------------------------------------------------------
816	! ---------------------------------------------------------------------
817	ret
818	.size	__udivsi3, .-__udivsi3
819#endif /* L_udivsi3 */
820
821
822
823#ifdef L_udivdi3
824
825	!--------------------------------------
826	#ifdef __big_endian__
827		#define  V1H  $r0
828		#define  V1L  $r1
829		#define  V2H  $r2
830		#define  V2L  $r3
831	#else
832		#define  V1H  $r1
833		#define  V1L  $r0
834		#define  V2H  $r3
835		#define  V2L  $r2
836	#endif
837	!--------------------------------------
838
839	.text
840	.align	2
841	.globl	__udivdi3
842	.type	__udivdi3, @function
843__udivdi3:
844	! prologue
845#ifdef __NDS32_ISA_V3M__
846	push25	$r8, 0
847#else
848	smw.adm	$r6, [$sp], $r8, 2
849#endif
850	! end of prologue
851	movi	$r4, 0
852	bal	__udivmoddi4
853	! epilogue
854#ifdef __NDS32_ISA_V3M__
855	pop25	$r8, 0
856#else
857	lmw.bim	$r6, [$sp], $r8, 2
858	ret
859#endif
860	.size	__udivdi3, .-__udivdi3
861#endif /* L_udivdi3 */
862
863
864
865#ifdef L_udivmoddi4
866
867	.text
868	.align	2
869	.globl	fudiv_qrnnd
870	.type	fudiv_qrnnd, @function
871	#ifdef __big_endian__
872		#define P1H     $r0
873		#define P1L     $r1
874		#define P2H     $r2
875		#define P2L     $r3
876		#define W6H     $r4
877		#define W6L     $r5
878		#define OFFSET_L 4
879		#define OFFSET_H 0
880	#else
881		#define P1H     $r1
882		#define P1L     $r0
883		#define P2H     $r3
884		#define P2L     $r2
885		#define W6H     $r5
886		#define W6L     $r4
887		#define OFFSET_L 0
888		#define OFFSET_H 4
889	#endif
890fudiv_qrnnd:
891	!------------------------------------------------------
892	! function:  fudiv_qrnnd(quotient, remainder, high_numerator, low_numerator, denominator)
893	!            divides a UDWtype, composed by the UWtype integers,HIGH_NUMERATOR (from $r4)
894	!            and LOW_NUMERATOR(from $r5) by DENOMINATOR(from $r6), and places the quotient
895	!            in $r7 and the remainder in $r8.
896	!------------------------------------------------------
897	!  in reg:$r4(n1), $r5(n0), $r6(d0)
898	!  __d1 = ((USItype) (d) >> ((4 * 8) / 2));
899	!  __d0 = ((USItype) (d) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
900	!  __r1 = (n1) % __d1;
901	!  __q1 = (n1) / __d1;
902	!  __m = (USItype) __q1 * __d0;
903	!  __r1 = __r1 * ((USItype) 1 << ((4 * 8) / 2)) | ((USItype) (n0) >> ((4 * 8) / 2));
904	!   if (__r1 < __m)
905	!    {
906	!------------------------------------------------------
907	smw.adm $r0, [$sp], $r4, 2				! store $lp, when use BASELINE_V1,and must store $r0-$r3
908	srli	$r7, $r6, 16					! $r7 = d1 =__ll_highpart (d)
909	movi	$ta, 65535
910	and	  $r8, $r6, $ta       				! $r8 = d0 = __ll_lowpart (d)
911
912	divr	$r9, $r10, $r4, $r7				! $r9 = q1, $r10 = r1
913	and	  $r4, $r5, $ta       				! $r4 = __ll_lowpart (n0)
914	slli	$r10, $r10, 16      				! $r10 = r1 << 16
915	srli	$ta, $r5, 16        				! $ta = __ll_highpart (n0)
916
917	or	$r10, $r10, $ta					! $r10 <- $r0|$r3=__r1
918	mul	$r5, $r9, $r8					! $r5 = m =  __q1*__d0
919	slt	$ta, $r10, $r5					! $ta <- __r1<__m
920	beqz	$ta, .L2					!if yes,skip
921	!------------------------------------------------------
922	!    __q1--, __r1 += (d);
923	!    if (__r1 >= (d))
924	!     {
925	!------------------------------------------------------
926
927	add	$r10, $r10, $r6					!$r10 <- __r1+d=__r1
928	addi	$r9, $r9, -1					!$r9 <- __q1--=__q1
929	slt	$ta, $r10, $r6					!$ta <- __r1<d
930	bnez	$ta, .L2					!if yes,skip
931	!------------------------------------------------------
932	!       if (__r1 < __m)
933	!        {
934	!------------------------------------------------------
935
936	slt	$ta, $r10, $r5					!$ta <- __r1<__m
937	beqz	$ta, .L2					!if yes,skip
938	!------------------------------------------------------
939	!           __q1--, __r1 += (d);
940	!        }
941	!     }
942	!  }
943	!------------------------------------------------------
944
945	addi	$r9, $r9, -1					!$r9 <- __q1--=__q1
946	add	$r10, $r10, $r6					!$r2 <- __r1+d=__r1
947.L2:
948	!------------------------------------------------------
949	!  __r1 -= __m;
950	!  __r0 = __r1 % __d1;
951	!  __q0 = __r1 / __d1;
952	!  __m = (USItype) __q0 * __d0;
953	!  __r0 = __r0 * ((USItype) 1 << ((4 * 8) / 2)) \
954	!        | ((USItype) (n0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
955	!  if (__r0 < __m)
956	!   {
957	!------------------------------------------------------
958	sub  $r10, $r10, $r5					!$r10 <- __r1-__m=__r1
959	divr	$r7, $r10, $r10, $r7				!$r7 <- r1/__d1=__q0,$r10 <- r1%__d1=__r0
960	slli	$r10, $r10, 16					!$r10 <- __r0<<16
961	mul	$r5, $r8, $r7					!$r5 <- __q0*__d0=__m
962	or	$r10, $r4, $r10					!$r3 <- $r0|__ll_lowpart (n0) =__r0
963	slt	$ta, $r10, $r5					!$ta <- __r0<__m
964	beqz	$ta, .L5					!if yes,skip
965	!------------------------------------------------------
966	!      __q0--, __r0 += (d);
967	!      if (__r0 >= (d))
968	!       {
969	!------------------------------------------------------
970
971	add	$r10, $r10, $r6					!$r10 <- __r0+d=__r0
972	addi	$r7, $r7, -1					!$r7 <- __q0--=__q0
973	slt	$ta, $r10, $r6					!$ta <- __r0<d
974	bnez	$ta, .L5					!if yes,skip
975	!------------------------------------------------------
976	!         if (__r0 < __m)
977	!          {
978	!------------------------------------------------------
979
980	slt	$ta, $r10, $r5					!$ta <- __r0<__m
981	beqz	$ta, .L5					!if yes,skip
982	!------------------------------------------------------
983	!             __q0--, __r0 += (d);
984	!          }
985	!       }
986	!   }
987	!------------------------------------------------------
988
989	add	  $r10, $r10, $r6				!$r3 <- __r0+d=__r0
990	addi	$r7, $r7, -1					!$r2 <- __q0--=__q0
991.L5:
992	!------------------------------------------------------
993	!   __r0 -= __m;
994	!   *q = (USItype) __q1 * ((USItype) 1 << ((4 * 8) / 2)) | __q0;
995	!   *r = __r0;
996	!}
997	!------------------------------------------------------
998
999	sub		$r8, $r10, $r5				!$r8 = r = r0 = __r0-__m
1000	slli	$r9, $r9, 16					!$r9 <- __q1<<16
1001	or	$r7, $r9, $r7					!$r7 = q = $r9|__q0
1002	lmw.bim $r0, [$sp], $r4, 2
1003	ret
1004	.size	fudiv_qrnnd, .-fudiv_qrnnd
1005
1006	.align	2
1007	.globl	__udivmoddi4
1008	.type	__udivmoddi4, @function
1009__udivmoddi4:
1010	! =====================================================================
1011	! stack allocation:
1012	! sp+40 +------------------+
1013	!       | q1               |
1014	! sp+36 +------------------+
1015	!       | q0               |
1016	! sp+32 +------------------+
1017	!       | bm               |
1018	! sp+28 +------------------+
1019	!       | $lp              |
1020	! sp+24 +------------------+
1021	!       | $fp              |
1022	! sp+20 +------------------+
1023	!       | $r6 - $r10       |
1024	! sp    +------------------+
1025	! =====================================================================
1026
1027	addi	$sp, $sp, -40
1028	smw.bi	$r6, [$sp], $r10, 10
1029	!------------------------------------------------------
1030	!  d0 = dd.s.low;
1031	!  d1 = dd.s.high;
1032	!  n0 = nn.s.low;
1033	!  n1 = nn.s.high;
1034	!  if (d1 == 0)
1035	!   {
1036	!------------------------------------------------------
1037
1038	move	$fp, $r4					!$fp <- rp
1039	bnez	P2H, .L9					!if yes,skip
1040	!------------------------------------------------------
1041	!     if (d0 > n1)
1042	!      {
1043	!------------------------------------------------------
1044
1045	slt	$ta, P1H, P2L					!$ta <- n1<d0
1046	beqz	$ta, .L10					!if yes,skip
1047#ifndef __NDS32_PERF_EXT__
1048	smw.adm $r0, [$sp], $r5, 0
1049	move    $r0, P2L
1050	bal __clzsi2
1051	move	$r7, $r0
1052	lmw.bim $r0, [$sp], $r5, 0
1053#else
1054	clz  $r7, P2L
1055#endif
1056	swi     $r7,  [$sp+(28)]
1057	beqz	$r7, .L18					!if yes,skip
1058	!------------------------------------------------------
1059	!         d0 = d0 << bm;
1060	!         n1 = (n1 << bm) | (n0 >> ((4 * 8) - bm));
1061	!         n0 = n0 << bm;
1062	!      }
1063	!------------------------------------------------------
1064
1065	subri	$r5, $r7, 32					!$r5 <- 32-bm
1066	srl	$r5, P1L, $r5					!$r5 <- n0>>$r5
1067	sll	$r6, P1H, $r7					!$r6 <- n1<<bm
1068	or	P1H, $r6, $r5					!P2h <- $r5|$r6=n1
1069	sll	P1L, P1L, $r7					!P1H <- n0<<bm=n0
1070	sll	P2L, P2L, $r7					!P2L <- d0<<bm=d0
1071.L18:
1072	!------------------------------------------------------
1073	!    fudiv_qrnnd (&q0, &n0, n1, n0, d0);
1074	!    q1 = 0;
1075	!  } #if (d0 > n1)
1076	!------------------------------------------------------
1077
1078	move 	$r4,P1H						! give fudiv_qrnnd args
1079	move 	$r5,P1L						!
1080	move 	$r6,P2L						!
1081	bal	fudiv_qrnnd					!calcaulte q0 n0
1082	movi	$r6, 0						!P1L <- 0
1083	swi     $r7,[$sp+32]                                    !q0
1084	swi     $r6,[$sp+36]                                    !q1
1085	move    P1L,$r8						!n0
1086	b	.L19
1087.L10:
1088	!------------------------------------------------------
1089	!  else #if (d0 > n1)
1090	!   {
1091	!     if(d0 == 0)
1092	!------------------------------------------------------
1093
1094	bnez	P2L, .L20					!if yes,skip
1095	!------------------------------------------------------
1096	!      d0 = 1 / d0;
1097	!------------------------------------------------------
1098
1099	movi	$r4, 1						!P1L <- 1
1100	divr	P2L, $r4, $r4, P2L				!$r9=1/d0,P1L=1%d0
1101.L20:
1102
1103#ifndef __NDS32_PERF_EXT__
1104	smw.adm $r0, [$sp], $r5, 0
1105	move    $r0, P2L
1106	bal __clzsi2
1107	move    $r7, $r0
1108	lmw.bim $r0, [$sp], $r5, 0
1109#else
1110	clz  $r7, P2L
1111#endif
1112	swi     $r7,[$sp+(28)]      ! store bm
1113	beqz	$r7, .L28					! if yes,skip
1114	!------------------------------------------------------
1115	!         b = (4 * 8) - bm;
1116	!         d0 = d0 << bm;
1117	!         n2 = n1 >> b;
1118	!         n1 = (n1 << bm) | (n0 >> b);
1119	!         n0 = n0 << bm;
1120	!         fudiv_qrnnd (&q1, &n1, n2, n1, d0);
1121	!    }
1122	!------------------------------------------------------
1123
1124	subri	$r10, $r7, 32					!$r10 <- 32-bm=b
1125	srl	$r4, P1L, $r10					!$r4 <- n0>>b
1126	sll	$r5, P1H, $r7					!$r5 <- n1<<bm
1127	or	$r5, $r5, $r4					!$r5 <- $r5|$r4=n1  !for fun
1128	sll	P2L, P2L, $r7					!P2L <- d0<<bm=d0   !for fun
1129	sll	P1L, P1L, $r7					!P1L <- n0<<bm=n0
1130	srl	$r4, P1H, $r10					!$r4 <- n1>>b=n2    !for fun
1131
1132	move    $r6,P2L                     			!for fun
1133	bal	fudiv_qrnnd					!caculate q1, n1
1134
1135	swi  $r7,[$sp+(36)]          ! q1 store
1136	move P1H,$r8                 ! n1 store
1137
1138	move $r4,$r8	             ! prepare for next fudiv_qrnnd()
1139	move $r5,P1L
1140	move $r6,P2L
1141	b	.L29
1142.L28:
1143	!------------------------------------------------------
1144	!    else // bm != 0
1145	!     {
1146	!        n1 -= d0;
1147	!        q1 = 1;
1148	!
1149	!------------------------------------------------------
1150
1151	sub	P1H, P1H, P2L					!P1L <- n1-d0=n1
1152	movi	$ta, 1						!
1153	swi	$ta, [$sp+(36)]	                                !1 -> [$sp+(36)]
1154
1155	move $r4,P1H						! give fudiv_qrnnd args
1156	move $r5,P1L
1157	move $r6,P2L
1158.L29:
1159	!------------------------------------------------------
1160	!    fudiv_qrnnd (&q0, &n0, n1, n0, d0);
1161	!------------------------------------------------------
1162
1163	bal	fudiv_qrnnd					!calcuate  q0, n0
1164	swi     $r7,[$sp+(32)]  !q0 store
1165	move    P1L,$r8		!n0
1166.L19:
1167	!------------------------------------------------------
1168	!    if (rp != 0)
1169	!     {
1170	!------------------------------------------------------
1171
1172	beqz	$fp, .L31					!if yes,skip
1173	!------------------------------------------------------
1174	!         rr.s.low = n0 >> bm;
1175	!         rr.s.high = 0;
1176	!         *rp = rr.ll;
1177	!     }
1178	!------------------------------------------------------
1179
1180	movi    $r5, 0							!$r5 <- 0
1181	lwi     $r7,[$sp+(28)]    					!load bm
1182	srl	$r4, P1L, $r7     	     				!$r4 <- n0>>bm
1183        swi	$r4, [$fp+OFFSET_L]	  !r0				!$r4 -> [$sp+(48)]
1184	swi	$r5, [$fp+OFFSET_H]	  !r1				!0 -> [$sp+(52)]
1185	b .L31
1186.L9:
1187	!------------------------------------------------------
1188	! else # d1 == 0
1189	!  {
1190	!     if(d1 > n1)
1191	!      {
1192	!------------------------------------------------------
1193
1194	slt	$ta, P1H, P2H					!$ta <- n1<d1
1195	beqz	$ta, .L32					!if yes,skip
1196	!------------------------------------------------------
1197	!         q0 = 0;
1198	!	  q1 = 0;
1199	!         if (rp != 0)
1200	!          {
1201	!------------------------------------------------------
1202
1203	movi	$r5, 0						!$r5 <- 0
1204	swi	$r5, [$sp+(32)]	   !q0				!0 -> [$sp+(40)]=q1
1205	swi	$r5, [$sp+(36)]    !q1				!0 -> [$sp+(32)]=q0
1206	beqz	$fp, .L31					!if yes,skip
1207	!------------------------------------------------------
1208	!             rr.s.low = n0;
1209	!	      rr.s.high = n1;
1210	!             *rp = rr.ll;
1211	!          }
1212	!------------------------------------------------------
1213
1214	swi	P1L, [$fp+OFFSET_L]					!P1L -> [rp]
1215	swi	P1H, [$fp+OFFSET_H]					!P1H -> [rp+4]
1216	b	.L31
1217.L32:
1218#ifndef __NDS32_PERF_EXT__
1219	smw.adm $r0, [$sp], $r5, 0
1220	move    $r0, P2H
1221	bal __clzsi2
1222	move    $r7, $r0
1223	lmw.bim $r0, [$sp], $r5, 0
1224#else
1225	clz  $r7,P2H
1226#endif
1227        swi     $r7,[$sp+(28)] 	                                !$r7=bm  store
1228	beqz	$r7, .L42					!if yes,skip
1229	!------------------------------------------------------
1230	!        USItype m1, m0;
1231	!        b = (4 * 8) - bm;
1232	!        d1 = (d0 >> b) | (d1 << bm);
1233	!        d0 = d0 << bm;
1234	!        n2 = n1 >> b;
1235	!        n1 = (n0 >> b) | (n1 << bm);
1236	!        n0 = n0 << bm;
1237	!        fudiv_qrnnd (&q0, &n1, n2, n1, d1);
1238	!------------------------------------------------------
1239
1240	subri	$r10, $r7, 32					!$r10 <- 32-bm=b
1241	srl	$r5, P2L, $r10					!$r5 <- d0>>b
1242	sll	$r6, P2H, $r7					!$r6 <- d1<<bm
1243	or      $r6, $r5, $r6                                   !$r6 <- $r5|$r6=d1  !! func
1244	move	P2H, $r6 					!P2H <- d1
1245	srl     $r4, P1H, $r10                                  !$r4 <- n1>>b=n2    !!! func
1246	srl	$r8, P1L, $r10					!$r8 <- n0>>b       !!$r8
1247	sll     $r9, P1H, $r7                                   !$r9 <- n1<<bm
1248	or	$r5, $r8, $r9					!$r5 <- $r8|$r9=n1  !func
1249	sll     P2L, P2L, $r7                                   !P2L <- d0<<bm=d0
1250	sll	P1L, P1L, $r7					!P1L <- n0<<bm=n0
1251
1252	bal	fudiv_qrnnd					! cal  q0,n1
1253	swi     $r7,[$sp+(32)]
1254	move    P1H,$r8            ! fudiv_qrnnd (&q0, &n1, n2, n1, d1);
1255        move    $r6, $r7           ! from func
1256
1257	!----------------------------------------------------
1258	!       #umul_ppmm (m1, m0, q0, d0);
1259	!        do
1260	!         {     USItype __x0, __x1, __x2, __x3;
1261	!               USItype __ul, __vl, __uh, __vh;
1262	!               __ul = ((USItype) (q0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
1263	!               __uh = ((USItype) (q0) >> ((4 * 8) / 2));
1264	!               __vl = ((USItype) (d0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
1265	!               __vh = ((USItype) (d0) >> ((4 * 8) / 2));
1266	!               __x0 = (USItype) __ul * __vl;
1267	!               __x1 = (USItype) __ul * __vh;
1268	!               __x2 = (USItype) __uh * __vl;
1269	!               __x3 = (USItype) __uh * __vh;
1270	!               __x1 += ((USItype) (__x0) >> ((4 * 8) / 2));
1271	!               __x1 += __x2;
1272	!               if (__x1 < __x2)
1273	!                  __x3 += ((USItype) 1 << ((4 * 8) / 2));
1274	!               (m1) = __x3 + ((USItype) (__x1) >> ((4 * 8) / 2));
1275	!               (m0) = (USItype)(q0*d0);
1276	!        }
1277	!        if (m1 > n1)
1278	!---------------------------------------------------
1279#ifdef __NDS32_ISA_V3M__
1280        !mulr64  $r4, P2L, $r6
1281	smw.adm $r0, [$sp], $r3, 0
1282	move	P1L, P2L
1283	move	P2L, $r6
1284	movi	P1H, 0
1285	movi	P2H, 0
1286	bal	__muldi3
1287	movd44	$r4, $r0
1288	lmw.bim $r0, [$sp], $r3, 0
1289        move    $r8, W6H
1290        move    $r5, W6L
1291#else
1292        mulr64  $r4, P2L, $r6
1293        move    $r8, W6H
1294        move    $r5, W6L
1295#endif
1296	slt	$ta, P1H, $r8					!$ta <- n1<m1
1297	bnez	$ta, .L46					!if yes,skip
1298	!------------------------------------------------------
1299	!   if(m1 == n1)
1300	!------------------------------------------------------
1301
1302	bne	$r8, P1H, .L45					!if yes,skip
1303	!------------------------------------------------------
1304	!   if(m0 > n0)
1305	!------------------------------------------------------
1306
1307	slt	$ta, P1L, $r5					!$ta <- n0<m0
1308	beqz	$ta, .L45					!if yes,skip
1309.L46:
1310	!------------------------------------------------------
1311	!    {
1312	!       q0--;
1313	!       # sub_ddmmss (m1, m0, m1, m0, d1, d0);
1314	!       do
1315	!        {   USItype __x;
1316	!            __x = (m0) - (d0);
1317	!            (m1) = (m1) - (d1) - (__x > (m0));
1318	!            (m0) = __x;
1319	!        }
1320	!    }
1321	!------------------------------------------------------
1322
1323	sub	$r4, $r5, P2L					!$r4 <- m0-d0=__x
1324	addi	$r6, $r6, -1					!$r6 <- q0--=q0
1325	sub	$r8, $r8, P2H					!$r8 <- m1-d1
1326	swi	$r6, [$sp+(32)]	      ! q0			!$r6->[$sp+(32)]
1327	slt	$ta, $r5, $r4					!$ta <- m0<__x
1328	sub	$r8, $r8, $ta					!$r8 <- P1H-P1L=m1
1329	move	$r5, $r4					!$r5 <- __x=m0
1330.L45:
1331	!------------------------------------------------------
1332	!    q1 = 0;
1333	!    if (rp != 0)
1334	!     {
1335	!------------------------------------------------------
1336
1337	movi	$r4, 0						!$r4 <- 0
1338	swi	$r4, [$sp+(36)]					!0 -> [$sp+(40)]=q1
1339	beqz	$fp, .L31					!if yes,skip
1340	!------------------------------------------------------
1341	!      # sub_ddmmss (n1, n0, n1, n0, m1, m0);
1342	!      do
1343	!       {   USItype __x;
1344	!           __x = (n0) - (m0);
1345	!           (n1) = (n1) - (m1) - (__x > (n0));
1346	!           (n0) = __x;
1347	!       }
1348	!       rr.s.low = (n1 << b) | (n0 >> bm);
1349	!       rr.s.high = n1 >> bm;
1350	!       *rp = rr.ll;
1351	!------------------------------------------------------
1352
1353	sub	$r4, P1H, $r8					!$r4 <- n1-m1
1354	sub	$r6, P1L, $r5					!$r6 <- n0-m0=__x=n0
1355	slt	$ta, P1L, $r6					!$ta <- n0<__x
1356	sub	P1H, $r4, $ta					!P1H <- $r4-$ta=n1
1357	move    P1L, $r6
1358
1359	lwi     $r7,[$sp+(28)]         ! load bm
1360	subri   $r10,$r7,32
1361	sll	$r4, P1H, $r10					!$r4 <- n1<<b
1362	srl	$r5, P1L, $r7					!$r5 <- __x>>bm
1363	or	$r6, $r5, $r4					!$r6 <- $r5|$r4=rr.s.low
1364	srl	$r8, P1H, $r7					!$r8 <- n1>>bm =rr.s.high
1365	swi	$r6, [$fp+OFFSET_L]				!
1366	swi	$r8, [$fp+OFFSET_H]				!
1367	b	.L31
1368.L42:
1369	!------------------------------------------------------
1370	!  else
1371	!   {
1372	!     if(n1 > d1)
1373	!------------------------------------------------------
1374
1375	slt	$ta, P2H, P1H					!$ta <- P2H<P1H
1376	bnez	$ta, .L52					!if yes,skip
1377	!------------------------------------------------------
1378	!     if (n0 >= d0)
1379	!------------------------------------------------------
1380
1381	slt	$ta, P1L, P2L					!$ta <- P1L<P2L
1382	bnez	$ta, .L51					!if yes,skip
1383	!------------------------------------------------------
1384	!        q0 = 1;
1385	!        do
1386	!         {   USItype __x;
1387	!             __x = (n0) - (d0);
1388	!             (n1) = (n1) - (d1) - (__x > (n0));
1389	!             (n0) = __x;
1390	!         }
1391	!------------------------------------------------------
1392.L52:
1393	sub	$r4, P1H, P2H					!$r4 <- P1H-P2H
1394	sub	$r6, P1L, P2L					!$r6 <- no-d0=__x=n0
1395	slt	$ta, P1L, $r6					!$ta <- no<__x
1396	sub	P1H, $r4, $ta					!P1H <- $r4-$ta=n1
1397	move    P1L, $r6					!n0
1398	movi	$r5, 1						!
1399	swi	$r5, [$sp+(32)]					!1 -> [$sp+(32)]=q0
1400	b	.L54
1401.L51:
1402	!------------------------------------------------------
1403	!       q0 = 0;
1404	!------------------------------------------------------
1405
1406	movi    $r5,0
1407	swi	$r5, [$sp+(32)]					!$r5=0 -> [$sp+(32)]
1408.L54:
1409	!------------------------------------------------------
1410	!       q1 = 0;
1411	!       if (rp != 0)
1412	!        {
1413	!------------------------------------------------------
1414
1415	movi	$r5, 0						!
1416	swi	$r5, [$sp+(36)]					!0 -> [$sp+(36)]
1417	beqz	$fp, .L31
1418	!------------------------------------------------------
1419	!          rr.s.low = n0;
1420	!          rr.s.high = n1;
1421	!          *rp = rr.ll;
1422	!        }
1423	!------------------------------------------------------
1424
1425	swi	P1L, [$fp+OFFSET_L]				!remainder
1426	swi	P1H, [$fp+OFFSET_H]				!
1427.L31:
1428	!------------------------------------------------------
1429	! const DWunion ww = {{.low = q0, .high = q1}};
1430	! return ww.ll;
1431	!}
1432	!------------------------------------------------------
1433
1434	lwi	P1L, [$sp+(32)]					!quotient
1435	lwi	P1H, [$sp+(36)]
1436	lmw.bim	$r6, [$sp], $r10, 10
1437	addi	$sp, $sp, 12
1438	ret
1439	.size	__udivmoddi4, .-__udivmoddi4
1440#endif /* L_udivmoddi4 */
1441
1442
1443
1444#ifdef L_umodsi3
1445
1446	! =====================================================================
1447	.text
1448	.align	2
1449	.globl	__umodsi3
1450	.type	__umodsi3, @function
1451__umodsi3:
1452	! ---------------------------------------------------------------------
1453	!!res=udivmodsi4(a,b,1);
1454	! if (den==0)
1455	!     return num;
1456	! ---------------------------------------------------------------------
1457	beqz	$r1, .L1			! if den==0, skip
1458	! ---------------------------------------------------------------------
1459	! bit=1;
1460	! res=0;
1461	! ---------------------------------------------------------------------
1462	movi	$r4, 1				! $r4  <- bit=1
1463#ifndef __OPTIMIZE_SIZE__
1464.L6:
1465#endif
1466	! ---------------------------------------------------------------------
1467	! while (den<num
1468	! ---------------------------------------------------------------------
1469	slt	$ta, $r1, $r0			! $ta  <- den<num?
1470	beqz	$ta, .L5			! if no, skip
1471	! ---------------------------------------------------------------------
1472	!      &&bit&&!(den&(1L<<31)))
1473	! ---------------------------------------------------------------------
1474	bltz	$r1, .L5			! if den<0, skip
1475	! ---------------------------------------------------------------------
1476	! {   den<<=1;
1477	!     bit<<=1;
1478	! }
1479	! ---------------------------------------------------------------------
1480#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
1481	clz	$r3, $r1			! $r3  <- leading zero count for den
1482	clz	$ta, $r0			! $ta  <- leading zero count for num
1483	sub	$r3, $r3, $ta			! $r3  <- number of bits to shift
1484	sll	$r1, $r1, $r3			! $r1  <- den
1485	sll	$r4, $r4, $r3			! $r2  <- bit
1486#else
1487	slli	$r1, $r1, 1			! $r1  <- den<<=1
1488	slli	$r4, $r4, 1			! $r4  <- bit<<=1
1489	b	.L6				! continue loop
1490#endif
1491.L5:
1492	! ---------------------------------------------------------------------
1493	! while (bit)
1494	! {   if (num>=den)
1495	!     {   num-=den;
1496	!         res|=bit;
1497	!     }
1498	!     bit>>=1;
1499	!     den>>=1;
1500	! }
1501	!!if (modwanted)
1502	!!    return num;
1503	!!return res;
1504	! ---------------------------------------------------------------------
1505	sub	$r2, $r0, $r1			! $r2  <- num-den
1506	slt	$ta, $r0, $r1			! $ta  <- num<den?
1507	srli	$r4, $r4, 1			! $r4  <- bit>>=1
1508	cmovz	$r0, $r2, $ta			! $r0  <- num=(num<den)?num:num-den
1509	srli	$r1, $r1, 1			! $r1  <- den>>=1
1510	bnez	$r4, .L5			! if bit!=0, continue loop
1511.L1:
1512	! ---------------------------------------------------------------------
1513	! return res;
1514	! ---------------------------------------------------------------------
1515	ret
1516	.size	__umodsi3, .-__umodsi3
1517#endif /* L_umodsi3 */
1518
1519
1520
1521#ifdef L_umoddi3
1522
1523	!--------------------------------------
1524	#ifdef __big_endian__
1525		#define  V1H  $r0
1526		#define  V1L  $r1
1527		#define  V2H  $r2
1528		#define  V2L  $r3
1529	#else
1530		#define  V1H  $r1
1531		#define  V1L  $r0
1532		#define  V2H  $r3
1533		#define  V2L  $r2
1534	#endif
1535	!--------------------------------------
1536	.text
1537	.align	2
1538	.globl	__umoddi3
1539	.type	__umoddi3, @function
1540__umoddi3:
1541	! prologue
1542	addi	$sp, $sp, -12
1543	swi $lp, [$sp+(0)]
1544	! end of prologue
1545	addi	$r4, $sp, 4
1546	bal	__udivmoddi4
1547	lwi	$r0, [$sp+(4)]    ! __udivmoddi4 return low when LE mode or return high when BE mode
1548	lwi	$r1, [$sp+(8)]    !
1549.L82:
1550	! epilogue
1551	lwi $lp, [$sp+(0)]
1552	addi	$sp, $sp, 12
1553	ret
1554	.size	__umoddi3, .-__umoddi3
1555#endif /* L_umoddi3 */
1556
1557
1558
1559#ifdef L_muldi3
1560
1561#ifdef __big_endian__
1562	#define P1H	$r0
1563	#define P1L	$r1
1564	#define P2H	$r2
1565	#define P2L	$r3
1566
1567	#define V2H $r4
1568	#define V2L $r5
1569#else
1570	#define P1H	$r1
1571	#define P1L	$r0
1572	#define P2H	$r3
1573	#define P2L	$r2
1574
1575	#define V2H $r5
1576	#define V2L $r4
1577#endif
1578
1579	! ====================================================================
1580	.text
1581	.align	2
1582	.globl	__muldi3
1583	.type	__muldi3, @function
1584__muldi3:
1585	! parameter passing for libgcc functions normally involves 2 doubles
1586	!---------------------------------------
1587#ifdef __NDS32_ISA_V3M__
1588	! There is no mulr64 instruction in Andes ISA V3M.
1589	! So we must provide a sequence of calculations to complete the job.
1590	smw.adm   $r6, [$sp], $r9, 0x0
1591	zeh33	  $r4, P1L
1592	srli      $r7, P1L, 16
1593	zeh33     $r5, P2L
1594	mul       $r6, $r5, $r4
1595	mul33     $r5, $r7
1596	srli      $r8, P2L, 16
1597	mov55     $r9, $r5
1598	maddr32   $r9, $r8, $r4
1599	srli      $r4, $r6, 16
1600	add       $r4, $r9, $r4
1601	slt45     $r4, $r5
1602	slli      $r5, $r15, 16
1603	maddr32   $r5, $r8, $r7
1604	mul       P2L, P1H, P2L
1605	srli      $r7, $r4, 16
1606	maddr32   P2L, P2H, P1L
1607	add333    P1H, $r5, $r7
1608	slli      $r4, $r4, 16
1609	zeh33     $r6, $r6
1610	add333    P1L, $r4, $r6
1611	add333    P1H, P2L, P1H
1612	lmw.bim   $r6, [$sp], $r9, 0x0
1613	ret
1614#else /* not  __NDS32_ISA_V3M__ */
1615	mul	    $ta, P1L, P2H
1616	mulr64	$r4, P1L, P2L
1617	maddr32	$ta, P1H, P2L
1618	move	  P1L, V2L
1619	add	    P1H, $ta, V2H
1620	ret
1621#endif /* not __NDS32_ISA_V3M__ */
1622	.size	__muldi3, .-__muldi3
1623#endif /* L_muldi3 */
1624
1625
1626
1627#ifdef L_addsub_df
1628
1629#ifndef __big_endian__
1630	#define P1L     $r0
1631	#define P1H     $r1
1632	#define P2L     $r2
1633	#define P2H     $r3
1634	#define P3L     $r4
1635	#define P3H     $r5
1636	#define O1L     $r7
1637	#define O1H	$r8
1638#else
1639	#define P1H     $r0
1640	#define P1L     $r1
1641	#define P2H     $r2
1642	#define P2L     $r3
1643	#define P3H     $r4
1644	#define P3L     $r5
1645	#define O1H     $r7
1646	#define O1L	$r8
1647#endif
1648	.text
1649	.align	2
1650	.global  __subdf3
1651	.type    __subdf3, @function
1652__subdf3:
1653	push    $lp
1654	pushm   $r6, $r10
1655
1656	move    $r4, #0x80000000
1657	xor     P2H, P2H, $r4
1658
1659	j       .Lsdpadd
1660
1661	.global  __adddf3
1662	.type    __adddf3, @function
1663__adddf3:
1664	push    $lp
1665	pushm   $r6, $r10
1666.Lsdpadd:
1667	slli    $r6, P1H, #1
1668	srli    $r6, $r6, #21
1669	slli    P3H, P1H, #11
1670	srli    $r10, P1L, #21
1671	or      P3H, P3H, $r10
1672	slli    P3L, P1L, #11
1673	move    O1L, #0x80000000
1674	or      P3H, P3H, O1L
1675	slli    $r9, P2H, #1
1676	srli    $r9, $r9, #21
1677	slli    O1H, P2H, #11
1678	srli    $r10, P2L, #21
1679	or      O1H, O1H, $r10
1680	or      O1H, O1H, O1L
1681	slli    O1L, P2L, #11
1682
1683	addi    $r10, $r6, #-1
1684	slti    $r15, $r10, #0x7fe
1685	beqzs8  .LEspecA
1686
1687.LElab1:
1688	addi    $r10, $r9, #-1
1689	slti    $r15, $r10, #0x7fe
1690	beqzs8  .LEspecB
1691
1692.LElab2:
1693	#NORMd($r4, P2L, P1L)
1694	bnez    P3H, .LL1
1695	bnez    P3L, .LL2
1696	move    $r6, #0
1697	j       .LL3
1698.LL2:
1699	move    P3H, P3L
1700	move    P3L, #0
1701	move    P2L, #32
1702	sub     $r6, $r6, P2L
1703.LL1:
1704#ifndef __big_endian__
1705#ifdef __NDS32_PERF_EXT__
1706	clz	$r2, $r5
1707#else
1708	pushm	$r0, $r1
1709	pushm	$r3, $r5
1710	move	$r0, $r5
1711	bal	__clzsi2
1712	move	$r2, $r0
1713	popm	$r3, $r5
1714	popm	$r0, $r1
1715#endif
1716#else /* __big_endian__ */
1717#ifdef __NDS32_PERF_EXT__
1718	clz	$r3, $r4
1719#else
1720	pushm	$r0, $r2
1721	pushm	$r4, $r5
1722	move	$r0, $r4
1723	bal	__clzsi2
1724	move	$r3, $r0
1725	popm	$r4, $r5
1726	popm	$r0, $r2
1727#endif
1728#endif /* __big_endian__ */
1729	beqz    P2L, .LL3
1730	sub     $r6, $r6, P2L
1731	subri   P1L, P2L, #32
1732	srl     P1L, P3L, P1L
1733	sll     P3L, P3L, P2L
1734	sll     P3H, P3H, P2L
1735	or      P3H, P3H, P1L
1736.LL3:
1737	#NORMd End
1738
1739	#NORMd($r7, P2L, P1L)
1740	bnez    O1H, .LL4
1741	bnez    O1L, .LL5
1742	move    $r9, #0
1743	j       .LL6
1744.LL5:
1745	move    O1H, O1L
1746	move    O1L, #0
1747	move    P2L, #32
1748	sub     $r9, $r9, P2L
1749.LL4:
1750#ifndef __big_endian__
1751#ifdef __NDS32_PERF_EXT__
1752	clz	$r2, O1H
1753#else
1754	pushm	$r0, $r1
1755	pushm	$r3, $r5
1756	move	$r0, O1H
1757	bal	__clzsi2
1758	move	$r2, $r0
1759	popm	$r3, $r5
1760	popm	$r0, $r1
1761#endif
1762#else /* __big_endian__ */
1763#ifdef __NDS32_PERF_EXT__
1764	clz	$r3, O1H
1765#else
1766	pushm	$r0, $r2
1767	pushm	$r4, $r5
1768	move	$r0, O1H
1769	bal	__clzsi2
1770	move	$r3, $r0
1771	popm	$r4, $r5
1772	popm	$r0, $r2
1773#endif
1774#endif /* __big_endian__ */
1775	beqz    P2L, .LL6
1776	sub     $r9, $r9, P2L
1777	subri   P1L, P2L, #32
1778	srl     P1L, O1L, P1L
1779	sll     O1L, O1L, P2L
1780	sll     O1H, O1H, P2L
1781	or      O1H, O1H, P1L
1782.LL6:
1783	#NORMd End
1784
1785	move    $r10, #0x80000000
1786	and     P1H, P1H, $r10
1787
1788	beq     $r6, $r9, .LEadd3
1789	slts    $r15, $r9, $r6
1790	beqzs8  .Li1
1791	sub     $r9, $r6, $r9
1792	move    P2L, #0
1793.LL7:
1794	move    $r10, #0x20
1795	slt     $r15, $r9, $r10
1796	bnezs8  .LL8
1797	or      P2L, P2L, O1L
1798	move    O1L, O1H
1799	move    O1H, #0
1800	addi    $r9, $r9, #0xffffffe0
1801	bnez    O1L, .LL7
1802.LL8:
1803	beqz    $r9, .LEadd3
1804	move    P1L, O1H
1805	move    $r10, O1L
1806	srl     O1L, O1L, $r9
1807	srl     O1H, O1H, $r9
1808	subri   $r9, $r9, #0x20
1809	sll     P1L, P1L, $r9
1810	or      O1L, O1L, P1L
1811	sll     $r10, $r10, $r9
1812	or      P2L, P2L, $r10
1813	beqz    P2L, .LEadd3
1814	ori     O1L, O1L, #1
1815	j       .LEadd3
1816.Li1:
1817	move    $r15, $r6
1818	move    $r6, $r9
1819	sub     $r9, $r9, $r15
1820	move    P2L, #0
1821.LL10:
1822	move    $r10, #0x20
1823	slt     $r15, $r9, $r10
1824	bnezs8  .LL11
1825	or      P2L, P2L, P3L
1826	move    P3L, P3H
1827	move    P3H, #0
1828	addi    $r9, $r9, #0xffffffe0
1829	bnez    P3L, .LL10
1830.LL11:
1831	beqz    $r9, .LEadd3
1832	move    P1L, P3H
1833	move    $r10, P3L
1834	srl     P3L, P3L, $r9
1835	srl     P3H, P3H, $r9
1836	subri   $r9, $r9, #0x20
1837	sll     P1L, P1L, $r9
1838	or      P3L, P3L, P1L
1839	sll     $r10, $r10, $r9
1840	or      P2L, P2L, $r10
1841	beqz    P2L, .LEadd3
1842	ori     P3L, P3L, #1
1843
1844.LEadd3:
1845	xor     $r10, P1H, P2H
1846	sltsi   $r15, $r10, #0
1847	bnezs8  .LEsub1
1848
1849	#ADD(P3L, O1L)
1850	add     P3L, P3L, O1L
1851	slt     $r15, P3L, O1L
1852
1853	#ADDCC(P3H, O1H)
1854	beqzs8  .LL13
1855	add     P3H, P3H, O1H
1856	slt     $r15, P3H, O1H
1857	beqzs8  .LL14
1858	addi    P3H, P3H, #0x1
1859	j       .LL15
1860.LL14:
1861	move    $r15, #1
1862	add     P3H, P3H, $r15
1863	slt     $r15, P3H, $r15
1864	j       .LL15
1865.LL13:
1866	add     P3H, P3H, O1H
1867	slt     $r15, P3H, O1H
1868.LL15:
1869
1870	beqzs8  .LEres
1871	andi    $r10, P3L, #1
1872	beqz    $r10, .Li3
1873	ori     P3L, P3L, #2
1874.Li3:
1875	srli    P3L, P3L, #1
1876	slli    $r10, P3H, #31
1877	or      P3L, P3L, $r10
1878	srli    P3H, P3H, #1
1879	move    $r10, #0x80000000
1880	or      P3H, P3H, $r10
1881	addi    $r6, $r6, #1
1882	subri   $r15, $r6, #0x7ff
1883	bnezs8  .LEres
1884	move    $r10, #0x7ff00000
1885	or      P1H, P1H, $r10
1886	move    P1L, #0
1887	j       .LEretA
1888
1889.LEsub1:
1890	#SUB(P3L, O1L)
1891	move    $r15, P3L
1892	sub     P3L, P3L, O1L
1893	slt     $r15, $r15, P3L
1894
1895	#SUBCC(P3H, O1H)
1896	beqzs8  .LL16
1897	move    $r15, P3H
1898	sub     P3H, P3H, O1H
1899	slt     $r15, $r15, P3H
1900	beqzs8  .LL17
1901	subi333 P3H, P3H, #1
1902	j       .LL18
1903.LL17:
1904	move    $r15, P3H
1905	subi333 P3H, P3H, #1
1906	slt     $r15, $r15, P3H
1907	j       .LL18
1908.LL16:
1909	move    $r15, P3H
1910	sub     P3H, P3H, O1H
1911	slt     $r15, $r15, P3H
1912.LL18:
1913
1914	beqzs8  .Li5
1915	move    $r10, #0x80000000
1916	xor     P1H, P1H, $r10
1917
1918	subri   P3H, P3H, #0
1919	beqz    P3L, .LL19
1920	subri   P3L, P3L, #0
1921	subi45  P3H, #1
1922.LL19:
1923
1924.Li5:
1925	#NORMd($r4, $r9, P1L)
1926	bnez    P3H, .LL20
1927	bnez    P3L, .LL21
1928	move    $r6, #0
1929	j       .LL22
1930.LL21:
1931	move    P3H, P3L
1932	move    P3L, #0
1933	move    $r9, #32
1934	sub     $r6, $r6, $r9
1935.LL20:
1936#ifdef __NDS32_PERF_EXT__
1937	clz	$r9, P3H
1938#else
1939	pushm	$r0, $r5
1940	move	$r0, P3H
1941	bal	__clzsi2
1942	move	$r9, $r0
1943	popm	$r0, $r5
1944#endif
1945	beqz    $r9, .LL22
1946	sub     $r6, $r6, $r9
1947	subri   P1L, $r9, #32
1948	srl     P1L, P3L, P1L
1949	sll     P3L, P3L, $r9
1950	sll     P3H, P3H, $r9
1951	or      P3H, P3H, P1L
1952.LL22:
1953	#NORMd End
1954
1955	or      $r10, P3H, P3L
1956	bnez    $r10, .LEres
1957	move    P1H, #0
1958
1959.LEres:
1960	blez    $r6, .LEund
1961
1962.LElab8:
1963	#ADD(P3L, $0x400)
1964	move    $r15, #0x400
1965	add     P3L, P3L, $r15
1966	slt     $r15, P3L, $r15
1967
1968	#ADDCC(P3H, $0x0)
1969	beqzs8  .LL25
1970	add     P3H, P3H, $r15
1971	slt     $r15, P3H, $r15
1972.LL25:
1973
1974	#ADDC($r6, $0x0)
1975	add     $r6, $r6, $r15
1976	srli    $r10, P3L, #11
1977	andi    $r10, $r10, #1
1978	sub     P3L, P3L, $r10
1979	srli    P1L, P3L, #11
1980	slli    $r10, P3H, #21
1981	or      P1L, P1L, $r10
1982	slli    $r10, P3H, #1
1983	srli    $r10, $r10, #12
1984	or      P1H, P1H, $r10
1985	slli    $r10, $r6, #20
1986	or      P1H, P1H, $r10
1987
1988.LEretA:
1989.LE999:
1990	popm    $r6, $r10
1991	pop     $lp
1992	ret5    $lp
1993
1994.LEspecA:
1995	#ADD(P3L, P3L)
1996	move    $r15, P3L
1997	add     P3L, P3L, P3L
1998	slt     $r15, P3L, $r15
1999
2000	#ADDC(P3H, P3H)
2001	add     P3H, P3H, P3H
2002	add     P3H, P3H, $r15
2003	bnez    $r6, .Li7
2004	or      $r10, P3H, P3L
2005	beqz    $r10, .Li8
2006	j       .LElab1
2007.Li8:
2008	subri   $r15, $r9, #0x7ff
2009	beqzs8  .LEspecB
2010	add     P3L, P2H, P2H
2011	or      $r10, P3L, P2L
2012	bnez    $r10, .LEretB
2013	sltsi   $r15, P2H, #0
2014	bnezs8  .LEretA
2015
2016.LEretB:
2017	move    P1L, P2L
2018	move    P1H, P2H
2019	j       .LE999
2020.Li7:
2021	or      $r10, P3H, P3L
2022	bnez    $r10, .LEnan
2023	subri   $r15, $r9, #0x7ff
2024	bnezs8  .LEretA
2025	xor     $r10, P1H, P2H
2026	sltsi   $r15, $r10, #0
2027	bnezs8  .LEnan
2028	j       .LEretB
2029
2030.LEspecB:
2031	#ADD(O1L, O1L)
2032	move    $r15, O1L
2033	add     O1L, O1L, O1L
2034	slt     $r15, O1L, $r15
2035
2036	#ADDC(O1H, O1H)
2037	add     O1H, O1H, O1H
2038	add     O1H, O1H, $r15
2039	bnez    $r9, .Li11
2040	or      $r10, O1H, O1L
2041	beqz    $r10, .LEretA
2042	j       .LElab2
2043.Li11:
2044	or      $r10, O1H, O1L
2045	beqz    $r10, .LEretB
2046
2047.LEnan:
2048	move    P1H, #0xfff80000
2049	move    P1L, #0
2050	j       .LEretA
2051
2052.LEund:
2053	subri   $r9, $r6, #1
2054	move    P2L, #0
2055.LL26:
2056	move    $r10, #0x20
2057	slt     $r15, $r9, $r10
2058	bnezs8  .LL27
2059	or      P2L, P2L, P3L
2060	move    P3L, P3H
2061	move    P3H, #0
2062	addi    $r9, $r9, #0xffffffe0
2063	bnez    P3L, .LL26
2064.LL27:
2065	beqz    $r9, .LL28
2066	move    P1L, P3H
2067	move    $r10, P3L
2068	srl     P3L, P3L, $r9
2069	srl     P3H, P3H, $r9
2070	subri   $r9, $r9, #0x20
2071	sll     P1L, P1L, $r9
2072	or      P3L, P3L, P1L
2073	sll     $r10, $r10, $r9
2074	or      P2L, P2L, $r10
2075	beqz    P2L, .LL28
2076	ori     P3L, P3L, #1
2077.LL28:
2078	move    $r6, #0
2079	j       .LElab8
2080	.size   __subdf3, .-__subdf3
2081	.size   __adddf3, .-__adddf3
2082#endif /* L_addsub_df */
2083
2084
2085
2086#ifdef L_mul_sf
2087
2088#if !defined (__big_endian__)
2089	#define P1L     $r0
2090	#define P1H     $r1
2091	#define P2L     $r2
2092	#define P2H     $r3
2093#else
2094	#define P1H     $r0
2095	#define P1L     $r1
2096	#define P2H     $r2
2097	#define P2L     $r3
2098#endif
2099	.text
2100	.align	2
2101	.global	__mulsf3
2102	.type	__mulsf3, @function
2103__mulsf3:
2104	push    $lp
2105	pushm   $r6, $r10
2106
2107	srli    $r3, $r0, #23
2108	andi    $r3, $r3, #0xff
2109	srli    $r5, $r1, #23
2110	andi    $r5, $r5, #0xff
2111	move    $r6, #0x80000000
2112	slli    $r2, $r0, #8
2113	or      $r2, $r2, $r6
2114	slli    $r4, $r1, #8
2115	or      $r4, $r4, $r6
2116	xor     $r8, $r0, $r1
2117	and     $r6, $r6, $r8
2118
2119	addi    $r8, $r3, #-1
2120	slti    $r15, $r8, #0xfe
2121	beqzs8  .LFspecA
2122
2123.LFlab1:
2124	addi    $r8, $r5, #-1
2125	slti    $r15, $r8, #0xfe
2126	beqzs8  .LFspecB
2127
2128.LFlab2:
2129	move    $r10, $r3
2130/* This is a 64-bit multiple. ($r2, $r7) is (high, low). */
2131#ifndef __NDS32_ISA_V3M__
2132	mulr64	$r2, $r2, $r4
2133#else
2134	pushm	$r0, $r1
2135	pushm	$r4, $r5
2136	move	P1L, $r2
2137	movi	P1H, #0
2138	move	P2L, $r4
2139	movi	P2H, #0
2140	bal	__muldi3
2141	movd44	$r2, $r0
2142	popm	$r4, $r5
2143	popm	$r0, $r1
2144#endif
2145#ifndef __big_endian__
2146	move    $r7, $r2
2147	move    $r2, $r3
2148#else
2149	move	$r7, $r3
2150#endif
2151	move    $r3, $r10
2152
2153	beqz    $r7, .Li17
2154	ori     $r2, $r2, #1
2155
2156.Li17:
2157	sltsi   $r15, $r2, #0
2158	bnezs8  .Li18
2159	slli    $r2, $r2, #1
2160	addi    $r3, $r3, #-1
2161.Li18:
2162	addi    $r8, $r5, #0xffffff82
2163	add     $r3, $r3, $r8
2164	addi    $r8, $r3, #-1
2165	slti    $r15, $r8, #0xfe
2166	beqzs8  .LFoveund
2167
2168.LFlab8:
2169	#ADD($r2, $0x80)
2170	move    $r15, #0x80
2171	add     $r2, $r2, $r15
2172	slt     $r15, $r2, $r15
2173
2174	#ADDC($r3, $0x0)
2175	add     $r3, $r3, $r15
2176	srli    $r8, $r2, #8
2177	andi    $r8, $r8, #1
2178	sub     $r2, $r2, $r8
2179	slli    $r2, $r2, #1
2180	srli    $r2, $r2, #9
2181	slli    $r8, $r3, #23
2182	or      $r2, $r2, $r8
2183	or      $r0, $r2, $r6
2184
2185.LF999:
2186	popm    $r6, $r10
2187	pop     $lp
2188	ret5    $lp
2189
2190.LFspecA:
2191	bnez    $r3, .Li19
2192	add     $r2, $r2, $r2
2193	beqz    $r2, .Li20
2194#ifdef __NDS32_PERF_EXT__
2195	clz	$r7, $r2
2196#else
2197	pushm	$r0, $r5
2198	move	$r0, $r2
2199	bal	__clzsi2
2200	move	$r7, $r0
2201	popm	$r0, $r5
2202#endif
2203	sub     $r3, $r3, $r7
2204	sll     $r2, $r2, $r7
2205	j       .LFlab1
2206.Li20:
2207	subri   $r15, $r5, #0xff
2208	beqzs8  .LFnan
2209	j       .LFzer
2210.Li19:
2211	add     $r8, $r2, $r2
2212	bnez    $r8, .LFnan
2213	bnez    $r5, .Li21
2214	add     $r8, $r4, $r4
2215	beqz    $r8, .LFnan
2216.Li21:
2217	subri   $r15, $r5, #0xff
2218	bnezs8  .LFinf
2219
2220.LFspecB:
2221	bnez    $r5, .Li22
2222	add     $r4, $r4, $r4
2223	beqz    $r4, .LFzer
2224#ifdef __NDS32_PERF_EXT__
2225	clz	$r7, $r4
2226#else
2227	pushm	$r0, $r5
2228	move	$r0, $r4
2229	bal	__clzsi2
2230	move	$r7, $r0
2231	popm	$r0, $r5
2232#endif
2233	sub     $r5, $r5, $r7
2234	sll     $r4, $r4, $r7
2235	j       .LFlab2
2236
2237.LFzer:
2238	move    $r0, $r6
2239	j       .LF999
2240.Li22:
2241	add     $r8, $r4, $r4
2242	bnez    $r8, .LFnan
2243
2244.LFinf:
2245	move    $r8, #0x7f800000
2246	or      $r0, $r6, $r8
2247	j       .LF999
2248
2249.LFnan:
2250	move    $r0, #0xffc00000
2251	j       .LF999
2252
2253.LFoveund:
2254	bgtz    $r3, .LFinf
2255	subri   $r7, $r3, #1
2256	slti    $r15, $r7, #0x20
2257	beqzs8  .LFzer
2258	subri   $r8, $r7, #0x20
2259	sll     $r3, $r2, $r8
2260	srl     $r2, $r2, $r7
2261	beqz    $r3, .Li25
2262	ori     $r2, $r2, #2
2263.Li25:
2264	move    $r3, #0
2265	addi    $r8, $r2, #0x80
2266	sltsi   $r15, $r8, #0
2267	beqzs8  .LFlab8
2268	move    $r3, #1
2269	j       .LFlab8
2270	.size	__mulsf3, .-__mulsf3
2271#endif /* L_mul_sf */
2272
2273
2274
2275#ifdef L_mul_df
2276
2277#ifndef __big_endian__
2278	#define P1L     $r0
2279	#define P1H     $r1
2280	#define P2L     $r2
2281	#define P2H     $r3
2282	#define P3L     $r4
2283	#define P3H     $r5
2284	#define O1L     $r7
2285	#define O1H	$r8
2286#else
2287	#define P1H     $r0
2288	#define P1L     $r1
2289	#define P2H     $r2
2290	#define P2L     $r3
2291	#define P3H     $r4
2292	#define P3L     $r5
2293	#define O1H     $r7
2294	#define O1L	$r8
2295#endif
2296	.text
2297	.align	2
2298	.global	__muldf3
2299	.type	__muldf3, @function
2300__muldf3:
2301	push    $lp
2302	pushm   $r6, $r10
2303
2304	slli    $r6, P1H, #1
2305	srli    $r6, $r6, #21
2306	slli    P3H, P1H, #11
2307	srli    $r10, P1L, #21
2308	or      P3H, P3H, $r10
2309	slli    P3L, P1L, #11
2310	move    O1L, #0x80000000
2311	or      P3H, P3H, O1L
2312	slli    $r9, P2H, #1
2313	srli    $r9, $r9, #21
2314	slli    O1H, P2H, #11
2315	srli    $r10, P2L, #21
2316	or      O1H, O1H, $r10
2317	or      O1H, O1H, O1L
2318	xor     P1H, P1H, P2H
2319	and     P1H, P1H, O1L
2320	slli    O1L, P2L, #11
2321
2322	addi    $r10, $r6, #-1
2323	slti    $r15, $r10, #0x7fe
2324	beqzs8  .LFspecA
2325
2326.LFlab1:
2327	addi    $r10, $r9, #-1
2328	slti    $r15, $r10, #0x7fe
2329	beqzs8  .LFspecB
2330
2331.LFlab2:
2332	addi    $r10, $r9, #0xfffffc02
2333	add     $r6, $r6, $r10
2334
2335	move    $r10, $r8
2336/* This is a 64-bit multiple. */
2337#ifndef __big_endian__
2338/* For little endian: ($r9, $r3) is (high, low). */
2339#ifndef __NDS32_ISA_V3M__
2340	mulr64	$r8, $r5, $r8
2341#else
2342	pushm	$r0, $r5
2343	move	$r0, $r5
2344	movi	$r1, #0
2345	move	$r2, $r8
2346	movi	$r3, #0
2347	bal	__muldi3
2348	movd44	$r8, $r0
2349	popm	$r0, $r5
2350#endif
2351	move    $r3, $r8
2352#else /* __big_endian__ */
2353/* For big endain: ($r9, $r2) is (high, low). */
2354#ifndef __NDS32_ISA_V3M__
2355	mulr64	$r8, $r4, $r7
2356#else
2357	pushm	$r0, $r5
2358	move	$r1, $r4
2359	movi	$r0, #0
2360	move	$r3, $r7
2361	movi	$r2, #0
2362	bal	__muldi3
2363	movd44	$r8, $r0
2364	popm	$r0, $r5
2365#endif
2366	move    $r2, $r9
2367	move    $r9, $r8
2368#endif /* __big_endian__ */
2369	move    $r8, $r10
2370
2371	move    $r10, P1H
2372/* This is a 64-bit multiple. */
2373#ifndef __big_endian__
2374/* For little endian: ($r0, $r2) is (high, low). */
2375#ifndef __NDS32_ISA_V3M__
2376	mulr64	$r0, $r4, $r8
2377#else
2378	pushm	$r2, $r5
2379	move	$r0, $r4
2380	movi	$r1, #0
2381	move	$r2, $r8
2382	movi	$r3, #0
2383	bal	__muldi3
2384	popm	$r2, $r5
2385#endif
2386	move    $r2, $r0
2387	move    $r0, $r1
2388#else /* __big_endian__ */
2389/* For big endain: ($r1, $r3) is (high, low). */
2390#ifndef __NDS32_ISA_V3M__
2391	mulr64	$r0, $r5, $r7
2392#else
2393	pushm	$r2, $r5
2394	move	$r1, $r5
2395	movi	$r0, #0
2396	move	$r3, $r7
2397	movi	$r2, #0
2398	bal	__muldi3
2399	popm	$r2, $r5
2400#endif
2401	move    $r3, $r1
2402	move    $r1, $r0
2403#endif /* __big_endian__ */
2404	move    P1H, $r10
2405
2406	#ADD(P2H, P1L)
2407	add     P2H, P2H, P1L
2408	slt     $r15, P2H, P1L
2409
2410	#ADDC($r9, $0x0)
2411	add     $r9, $r9, $r15
2412
2413	move    $r10, P1H
2414/* This is a 64-bit multiple. */
2415#ifndef __big_endian__
2416/* For little endian: ($r0, $r8) is (high, low). */
2417#ifndef __NDS32_ISA_V3M__
2418	mulr64	$r0, $r5, $r7
2419#else
2420	pushm	$r2, $r5
2421	move	$r0, $r5
2422	movi	$r1, #0
2423	move	$r2, $r7
2424	movi	$r3, #0
2425	bal	__muldi3
2426	popm	$r2, $r5
2427#endif
2428	move    $r8, $r0
2429	move    $r0, $r1
2430#else /* __big_endian__ */
2431/* For big endian: ($r1, $r7) is (high, low). */
2432#ifndef __NDS32_ISA_V3M__
2433	mulr64	$r0, $r4, $r8
2434#else
2435	pushm	$r2, $r5
2436	move	$r1, $r4
2437	movi	$r0, #0
2438	move	$r3, $r8
2439	movi	$r2, #0
2440	bal	__muldi3
2441	popm	$r2, $r5
2442#endif
2443	move	$r7, $r1
2444	move	$r1, $r0
2445#endif /* __big_endian__ */
2446	move    P1H, $r10
2447
2448	#ADD(P2L, O1H)
2449	add     P2L, P2L, O1H
2450	slt     $r15, P2L, O1H
2451
2452
2453	#ADDCC(P2H, P1L)
2454	beqzs8  .LL29
2455	add     P2H, P2H, P1L
2456	slt     $r15, P2H, P1L
2457	beqzs8  .LL30
2458	addi    P2H, P2H, #0x1
2459	j       .LL31
2460.LL30:
2461	move    $r15, #1
2462	add     P2H, P2H, $r15
2463	slt     $r15, P2H, $r15
2464	j       .LL31
2465.LL29:
2466	add     P2H, P2H, P1L
2467	slt     $r15, P2H, P1L
2468.LL31:
2469
2470	#ADDC($r9, $0x0)
2471	add     $r9, $r9, $r15
2472
2473/* This is a 64-bit multiple. */
2474#ifndef __big_endian__
2475/* For little endian: ($r8, $r0) is (high, low). */
2476	move    $r10, $r9
2477#ifndef __NDS32_ISA_V3M__
2478	mulr64	$r8, $r4, $r7
2479#else
2480	pushm	$r0, $r5
2481	move	$r0, $r4
2482	movi	$r1, #0
2483	move	$r2, $r7
2484	movi	$r3, #0
2485	bal	__muldi3
2486	movd44	$r8, $r0
2487	popm	$r0, $r5
2488#endif
2489	move    $r0, $r8
2490	move    $r8, $r9
2491	move    $r9, $r10
2492#else /* __big_endian__ */
2493/* For big endian: ($r7, $r1) is (high, low). */
2494	move	$r10, $r6
2495#ifndef __NDS32_ISA_V3M__
2496	mulr64	$r6, $r5, $r8
2497#else
2498	pushm	$r0, $r5
2499	move	$r1, $r5
2500	movi	$r0, #0
2501	move	$r3, $r8
2502	movi	$r2, #0
2503	bal	__muldi3
2504	movd44	$r6, $r0
2505	popm	$r0, $r5
2506#endif
2507	move	$r1, $r7
2508	move	$r7, $r6
2509	move	$r6, $r10
2510#endif /* __big_endian__ */
2511
2512	#ADD(P2L, O1H)
2513	add     P2L, P2L, O1H
2514	slt     $r15, P2L, O1H
2515
2516
2517	#ADDCC(P2H, $0x0)
2518	beqzs8  .LL34
2519	add     P2H, P2H, $r15
2520	slt     $r15, P2H, $r15
2521.LL34:
2522
2523	#ADDC($r9, $0x0)
2524	add     $r9, $r9, $r15
2525	or      $r10, P1L, P2L
2526	beqz    $r10, .Li13
2527	ori     P2H, P2H, #1
2528.Li13:
2529	move    P3H, $r9
2530	move    P3L, P2H
2531	sltsi   $r15, P3H, #0
2532	bnezs8  .Li14
2533
2534	move    $r15, P3L
2535	add     P3L, P3L, P3L
2536	slt     $r15, P3L, $r15
2537	add     P3H, P3H, P3H
2538	add     P3H, P3H, $r15
2539	addi    $r6, $r6, #-1
2540.Li14:
2541	addi    $r10, $r6, #-1
2542	slti    $r15, $r10, #0x7fe
2543	beqzs8  .LFoveund
2544
2545	#ADD(P3L, $0x400)
2546	move    $r15, #0x400
2547	add     P3L, P3L, $r15
2548	slt     $r15, P3L, $r15
2549
2550
2551	#ADDCC(P3H, $0x0)
2552	beqzs8  .LL37
2553	add     P3H, P3H, $r15
2554	slt     $r15, P3H, $r15
2555.LL37:
2556
2557	#ADDC($r6, $0x0)
2558	add     $r6, $r6, $r15
2559
2560.LFlab8:
2561	srli    $r10, P3L, #11
2562	andi    $r10, $r10, #1
2563	sub     P3L, P3L, $r10
2564	srli    P1L, P3L, #11
2565	slli    $r10, P3H, #21
2566	or      P1L, P1L, $r10
2567	slli    $r10, P3H, #1
2568	srli    $r10, $r10, #12
2569	or      P1H, P1H, $r10
2570	slli    $r10, $r6, #20
2571	or      P1H, P1H, $r10
2572
2573.LFret:
2574.LF999:
2575	popm    $r6, $r10
2576	pop     $lp
2577	ret5    $lp
2578
2579.LFspecA:
2580	#ADD(P3L, P3L)
2581	move    $r15, P3L
2582	add     P3L, P3L, P3L
2583	slt     $r15, P3L, $r15
2584
2585	#ADDC(P3H, P3H)
2586	add     P3H, P3H, P3H
2587	add     P3H, P3H, $r15
2588	bnez    $r6, .Li15
2589	or      $r10, P3H, P3L
2590	beqz    $r10, .Li16
2591
2592
2593	#NORMd($r4, P1L, P2H)
2594	bnez    P3H, .LL38
2595	bnez    P3L, .LL39
2596	move    $r6, #0
2597	j       .LL40
2598.LL39:
2599	move    P3H, P3L
2600	move    P3L, #0
2601	move    P1L, #32
2602	sub     $r6, $r6, P1L
2603.LL38:
2604#ifndef __big_endian__
2605#ifdef __NDS32_PERF_EXT__
2606	clz	$r0, P3H
2607#else
2608	pushm	$r1, P3H
2609	move	$r0, P3H
2610	bal	__clzsi2
2611	popm	$r1, $r5
2612#endif
2613#else /* __big_endian__ */
2614#ifdef __NDS32_PERF_EXT__
2615	clz	$r1, $r4
2616#else
2617	push	$r0
2618	pushm	$r2, $r5
2619	move	$r0, $r4
2620	bal	__clzsi2
2621	move	$r1, $r0
2622	popm	$r2, $r5
2623	pop	$r0
2624#endif
2625#endif /* __big_endian__ */
2626	beqz    P1L, .LL40
2627	sub     $r6, $r6, P1L
2628	subri   P2H, P1L, #32
2629	srl     P2H, P3L, P2H
2630	sll     P3L, P3L, P1L
2631	sll     P3H, P3H, P1L
2632	or      P3H, P3H, P2H
2633.LL40:
2634	#NORMd End
2635
2636	j       .LFlab1
2637.Li16:
2638	subri   $r15, $r9, #0x7ff
2639	beqzs8  .LFnan
2640	j       .LFret
2641.Li15:
2642	or      $r10, P3H, P3L
2643	bnez    $r10, .LFnan
2644	bnez    $r9, .Li17
2645	slli    $r10, O1H, #1
2646	or      $r10, $r10, O1L
2647	beqz    $r10, .LFnan
2648.Li17:
2649	subri   $r15, $r9, #0x7ff
2650	bnezs8  .LFinf
2651
2652.LFspecB:
2653	#ADD(O1L, O1L)
2654	move    $r15, O1L
2655	add     O1L, O1L, O1L
2656	slt     $r15, O1L, $r15
2657
2658	#ADDC(O1H, O1H)
2659	add     O1H, O1H, O1H
2660	add     O1H, O1H, $r15
2661	bnez    $r9, .Li18
2662	or      $r10, O1H, O1L
2663	beqz    $r10, .Li19
2664
2665
2666	#NORMd($r7, P2L, P1L)
2667	bnez    O1H, .LL41
2668	bnez    O1L, .LL42
2669	move    $r9, #0
2670	j       .LL43
2671.LL42:
2672	move    O1H, O1L
2673	move    O1L, #0
2674	move    P2L, #32
2675	sub     $r9, $r9, P2L
2676.LL41:
2677#ifndef __big_endian__
2678#ifdef __NDS32_PERF_EXT__
2679	clz	$r2, $r8
2680#else
2681	pushm	$r0, $r1
2682	pushm	$r3, $r5
2683	move	$r0, $r8
2684	bal	__clzsi2
2685	move	$r2, $r0
2686	popm	$r3, $r5
2687	popm	$r0, $r1
2688#endif
2689#else /* __big_endian__ */
2690#ifdef __NDS32_PERF_EXT__
2691	clz	$r3, $r7
2692#else
2693	pushm	$r0, $r2
2694	pushm	$r4, $r5
2695	move	$r0, $r7
2696	bal	__clzsi2
2697	move	$r3, $r0
2698	popm	$r4, $r5
2699	popm	$r0, $r2
2700#endif
2701#endif /* __big_endian__ */
2702	beqz    P2L, .LL43
2703	sub     $r9, $r9, P2L
2704	subri   P1L, P2L, #32
2705	srl     P1L, O1L, P1L
2706	sll     O1L, O1L, P2L
2707	sll     O1H, O1H, P2L
2708	or      O1H, O1H, P1L
2709.LL43:
2710	#NORMd End
2711
2712	j       .LFlab2
2713.Li19:
2714	move    P1L, #0
2715	j       .LFret
2716.Li18:
2717	or      $r10, O1H, O1L
2718	bnez    $r10, .LFnan
2719
2720.LFinf:
2721	move    $r10, #0x7ff00000
2722	or      P1H, P1H, $r10
2723	move    P1L, #0
2724	j       .LFret
2725
2726.LFnan:
2727	move    P1H, #0xfff80000
2728	move    P1L, #0
2729	j       .LFret
2730
2731.LFoveund:
2732	bgtz    $r6, .LFinf
2733	subri   P1L, $r6, #1
2734	move    P2L, #0
2735.LL44:
2736	move    $r10, #0x20
2737	slt     $r15, P1L, $r10
2738	bnezs8  .LL45
2739	or      P2L, P2L, P3L
2740	move    P3L, P3H
2741	move    P3H, #0
2742	addi    P1L, P1L, #0xffffffe0
2743	bnez    P3L, .LL44
2744.LL45:
2745	beqz    P1L, .LL46
2746	move    P2H, P3H
2747	move    $r10, P3L
2748	srl     P3L, P3L, P1L
2749	srl     P3H, P3H, P1L
2750	subri   P1L, P1L, #0x20
2751	sll     P2H, P2H, P1L
2752	or      P3L, P3L, P2H
2753	sll     $r10, $r10, P1L
2754	or      P2L, P2L, $r10
2755	beqz    P2L, .LL46
2756	ori     P3L, P3L, #1
2757.LL46:
2758	#ADD(P3L, $0x400)
2759	move    $r15, #0x400
2760	add     P3L, P3L, $r15
2761	slt     $r15, P3L, $r15
2762
2763	#ADDC(P3H, $0x0)
2764	add     P3H, P3H, $r15
2765	srli    $r6, P3H, #31
2766	j       .LFlab8
2767	.size __muldf3, .-__muldf3
2768#endif /* L_mul_df */
2769
2770
2771
2772#ifdef L_div_sf
2773
2774	.text
2775	.align	2
2776	.global	__divsf3
2777	.type	__divsf3, @function
2778__divsf3:
2779	push    $lp
2780	pushm   $r6, $r10
2781
2782	move    $r7, #0x80000000
2783	srli    $r4, $r0, #23
2784	andi    $r4, $r4, #0xff
2785	srli    $r6, $r1, #23
2786	andi    $r6, $r6, #0xff
2787	slli    $r3, $r0, #8
2788	or      $r3, $r3, $r7
2789	slli    $r5, $r1, #8
2790	or      $r5, $r5, $r7
2791	xor     $r10, $r0, $r1
2792	and     $r7, $r7, $r10
2793
2794	addi    $r10, $r4, #-1
2795	slti    $r15, $r10, #0xfe
2796	beqzs8  .LGspecA
2797
2798.LGlab1:
2799	addi    $r10, $r6, #-1
2800	slti    $r15, $r10, #0xfe
2801	beqzs8  .LGspecB
2802
2803.LGlab2:
2804	slt     $r15, $r3, $r5
2805	bnezs8  .Li27
2806	srli    $r3, $r3, #1
2807	addi    $r4, $r4, #1
2808.Li27:
2809	srli    $r8, $r5, #14
2810	divr    $r0, $r2, $r3, $r8
2811	andi    $r9, $r5, #0x3fff
2812	mul     $r1, $r9, $r0
2813	slli    $r2, $r2, #14
2814
2815	#SUB($r2, $r1)
2816	move    $r15, $r2
2817	sub     $r2, $r2, $r1
2818	slt     $r15, $r15, $r2
2819	beqzs8  .Li28
2820	addi    $r0, $r0, #-1
2821
2822	#ADD($r2, $r5)
2823	add     $r2, $r2, $r5
2824	slt     $r15, $r2, $r5
2825.Li28:
2826	divr    $r3, $r2, $r2, $r8
2827	mul     $r1, $r9, $r3
2828	slli    $r2, $r2, #14
2829
2830	#SUB($r2, $r1)
2831	move    $r15, $r2
2832	sub     $r2, $r2, $r1
2833	slt     $r15, $r15, $r2
2834	beqzs8  .Li29
2835	addi    $r3, $r3, #-1
2836
2837	#ADD($r2, $r5)
2838	add     $r2, $r2, $r5
2839	slt     $r15, $r2, $r5
2840.Li29:
2841	slli    $r10, $r0, #14
2842	add     $r3, $r3, $r10
2843	slli    $r3, $r3, #4
2844	beqz    $r2, .Li30
2845	ori     $r3, $r3, #1
2846.Li30:
2847	subri   $r10, $r6, #0x7e
2848	add     $r4, $r4, $r10
2849	addi    $r10, $r4, #-1
2850	slti    $r15, $r10, #0xfe
2851	beqzs8  .LGoveund
2852
2853.LGlab8:
2854	#ADD($r3, $0x80)
2855	move    $r15, #0x80
2856	add     $r3, $r3, $r15
2857	slt     $r15, $r3, $r15
2858
2859	#ADDC($r4, $0x0)
2860	add     $r4, $r4, $r15
2861	srli    $r10, $r3, #8
2862	andi    $r10, $r10, #1
2863	sub     $r3, $r3, $r10
2864	slli    $r3, $r3, #1
2865	srli    $r3, $r3, #9
2866	slli    $r10, $r4, #23
2867	or      $r3, $r3, $r10
2868	or      $r0, $r3, $r7
2869
2870.LG999:
2871	popm    $r6, $r10
2872	pop     $lp
2873	ret5    $lp
2874
2875.LGspecA:
2876	bnez    $r4, .Li31
2877	add     $r3, $r3, $r3
2878	beqz    $r3, .Li31
2879#ifdef __NDS32_PERF_EXT__
2880	clz	$r8, $r3
2881#else
2882	pushm	$r0, $r5
2883	move	$r0, $r3
2884	bal	__clzsi2
2885	move	$r8, $r0
2886	popm	$r0, $r5
2887#endif
2888	sub     $r4, $r4, $r8
2889	sll     $r3, $r3, $r8
2890	j       .LGlab1
2891.Li31:
2892	bne     $r6, $r4, .Li33
2893	add     $r10, $r5, $r5
2894	beqz    $r10, .LGnan
2895.Li33:
2896	subri   $r15, $r6, #0xff
2897	beqzs8  .LGspecB
2898	beqz    $r4, .LGzer
2899	add     $r10, $r3, $r3
2900	bnez    $r10, .LGnan
2901	j       .LGinf
2902
2903.LGspecB:
2904	bnez    $r6, .Li34
2905	add     $r5, $r5, $r5
2906	beqz    $r5, .LGinf
2907#ifdef __NDS32_PERF_EXT__
2908	clz	$r8, $r5
2909#else
2910	pushm	$r0, $r5
2911	move	$r0, $r5
2912	bal	__clzsi2
2913	move	$r8, $r0
2914	popm	$r0, $r5
2915#endif
2916	sub     $r6, $r6, $r8
2917	sll     $r5, $r5, $r8
2918	j       .LGlab2
2919.Li34:
2920	add     $r10, $r5, $r5
2921	bnez    $r10, .LGnan
2922
2923.LGzer:
2924	move    $r0, $r7
2925	j       .LG999
2926
2927.LGoveund:
2928	bgtz    $r4, .LGinf
2929	subri   $r8, $r4, #1
2930	slti    $r15, $r8, #0x20
2931	beqzs8  .LGzer
2932	subri   $r10, $r8, #0x20
2933	sll     $r4, $r3, $r10
2934	srl     $r3, $r3, $r8
2935	beqz    $r4, .Li37
2936	ori     $r3, $r3, #2
2937.Li37:
2938	move    $r4, #0
2939	addi    $r10, $r3, #0x80
2940	sltsi   $r15, $r10, #0
2941	beqzs8  .LGlab8
2942	move    $r4, #1
2943	j       .LGlab8
2944
2945.LGinf:
2946	move    $r10, #0x7f800000
2947	or      $r0, $r7, $r10
2948	j       .LG999
2949
2950.LGnan:
2951	move    $r0, #0xffc00000
2952	j       .LG999
2953	.size	__divsf3, .-__divsf3
2954#endif /* L_div_sf */
2955
2956
2957
2958#ifdef L_div_df
2959
2960#ifndef __big_endian__
2961	#define P1L     $r0
2962	#define P1H     $r1
2963	#define P2L     $r2
2964	#define P2H     $r3
2965	#define P3L     $r4
2966	#define P3H     $r5
2967	#define O1L     $r7
2968	#define O1H	$r8
2969#else
2970	#define P1H     $r0
2971	#define P1L     $r1
2972	#define P2H     $r2
2973	#define P2L     $r3
2974	#define P3H     $r4
2975	#define P3L     $r5
2976	#define O1H     $r7
2977	#define O1L	$r8
2978#endif
2979	.text
2980	.align	2
2981	.global	__divdf3
2982	.type	__divdf3, @function
2983__divdf3:
2984	push    $lp
2985	pushm   $r6, $r10
2986
2987	slli    $r6, P1H, #1
2988	srli    $r6, $r6, #21
2989	slli    P3H, P1H, #11
2990	srli    $r10, P1L, #21
2991	or      P3H, P3H, $r10
2992	slli    P3L, P1L, #11
2993	move    O1L, #0x80000000
2994	or      P3H, P3H, O1L
2995	slli    $r9, P2H, #1
2996	srli    $r9, $r9, #21
2997	slli    O1H, P2H, #11
2998	srli    $r10, P2L, #21
2999	or      O1H, O1H, $r10
3000	or      O1H, O1H, O1L
3001	xor     P1H, P1H, P2H
3002	and     P1H, P1H, O1L
3003	slli    O1L, P2L, #11
3004
3005	addi    $r10, $r6, #-1
3006	slti    $r15, $r10, #0x7fe
3007	beqzs8  .LGspecA
3008
3009.LGlab1:
3010	addi    $r10, $r9, #-1
3011	slti    $r15, $r10, #0x7fe
3012	beqzs8  .LGspecB
3013
3014.LGlab2:
3015	sub     $r6, $r6, $r9
3016	addi    $r6, $r6, #0x3ff
3017	srli    P3L, P3L, #1
3018	slli    $r10, P3H, #31
3019	or      P3L, P3L, $r10
3020	srli    P3H, P3H, #1
3021	srli    $r9, O1H, #16
3022	divr    P2H, P3H, P3H, $r9
3023	move    $r10, #0xffff
3024	and     P2L, O1H, $r10
3025	mul     P1L, P2L, P2H
3026	slli    P3H, P3H, #16
3027	srli    $r10, P3L, #16
3028	or      P3H, P3H, $r10
3029
3030	#SUB(P3H, P1L)
3031	move    $r15, P3H
3032	sub     P3H, P3H, P1L
3033	slt     $r15, $r15, P3H
3034	beqzs8  .Li20
3035
3036.Lb21:
3037	addi    P2H, P2H, #-1
3038	add     P3H, P3H, O1H
3039	slt     $r15, P3H, O1H
3040	beqzs8  .Lb21
3041.Li20:
3042	divr    $r9, P3H, P3H, $r9
3043	mul     P1L, P2L, $r9
3044	slli    P3H, P3H, #16
3045	move    $r15, #0xffff
3046	and     $r10, P3L, $r15
3047	or      P3H, P3H, $r10
3048
3049	#SUB(P3H, P1L)
3050	move    $r15, P3H
3051	sub     P3H, P3H, P1L
3052	slt     $r15, $r15, P3H
3053	beqzs8  .Li22
3054
3055.Lb23:
3056	addi    $r9, $r9, #-1
3057	add     P3H, P3H, O1H
3058	slt     $r15, P3H, O1H
3059	beqzs8  .Lb23
3060.Li22:
3061	slli    P2H, P2H, #16
3062	add     P2H, P2H, $r9
3063
3064/* This is a 64-bit multiple. */
3065#ifndef __big_endian__
3066/* For little endian: ($r0, $r9) is (high, low). */
3067	move    $r10, $r1
3068#ifndef __NDS32_ISA_V3M__
3069	mulr64	$r0, $r3, $r7
3070#else
3071	pushm	$r2, $r5
3072	move	$r0, $r3
3073	movi	$r1, #0
3074	move	$r2, $r7
3075	movi	$r3, #0
3076	bal	__muldi3
3077	popm	$r2, $r5
3078#endif
3079	move    $r9, $r0
3080	move    $r0, $r1
3081	move    $r1, $r10
3082#else /* __big_endian__ */
3083/* For big endian: ($r1, $r9) is (high, low). */
3084	move    $r10, $r0
3085#ifndef __NDS32_ISA_V3M__
3086	mulr64	$r0, $r2, $r8
3087#else
3088	pushm	$r2, $r5
3089	move	$r1, $r2
3090	movi	$r0, #0
3091	move	$r3, $r8
3092	movi	$r2, #0
3093	bal	__muldi3
3094	popm	$r2, $r5
3095#endif
3096	move    $r9, $r1
3097	move    $r1, $r0
3098	move    $r0, $r10
3099#endif /* __big_endian__ */
3100
3101	move    P3L, #0
3102
3103	#SUB(P3L, $r9)
3104	move    $r15, P3L
3105	sub     P3L, P3L, $r9
3106	slt     $r15, $r15, P3L
3107
3108
3109	#SUBCC(P3H, P1L)
3110	beqzs8  .LL47
3111	move    $r15, P3H
3112	sub     P3H, P3H, P1L
3113	slt     $r15, $r15, P3H
3114	beqzs8  .LL48
3115	subi333 P3H, P3H, #1
3116	j       .LL49
3117.LL48:
3118	move    $r15, P3H
3119	subi333 P3H, P3H, #1
3120	slt     $r15, $r15, P3H
3121	j       .LL49
3122.LL47:
3123	move    $r15, P3H
3124	sub     P3H, P3H, P1L
3125	slt     $r15, $r15, P3H
3126.LL49:
3127
3128	beqzs8  .Li24
3129
3130.LGlab3:
3131	addi    P2H, P2H, #-1
3132
3133	#ADD(P3L, O1L)
3134	add     P3L, P3L, O1L
3135	slt     $r15, P3L, O1L
3136
3137
3138	#ADDCC(P3H, O1H)
3139	beqzs8  .LL50
3140	add     P3H, P3H, O1H
3141	slt     $r15, P3H, O1H
3142	beqzs8  .LL51
3143	addi    P3H, P3H, #0x1
3144	j       .LL52
3145.LL51:
3146	move    $r15, #1
3147	add     P3H, P3H, $r15
3148	slt     $r15, P3H, $r15
3149	j       .LL52
3150.LL50:
3151	add     P3H, P3H, O1H
3152	slt     $r15, P3H, O1H
3153.LL52:
3154
3155	beqzs8  .LGlab3
3156.Li24:
3157	bne     P3H, O1H, .Li25
3158	move    P1L, O1L
3159	move    P3H, P3L
3160	move    $r9, #0
3161	move    P2L, $r9
3162	j       .Le25
3163.Li25:
3164	srli    P2L, O1H, #16
3165	divr    $r9, P3H, P3H, P2L
3166	move    $r10, #0xffff
3167	and     $r10, O1H, $r10
3168	mul     P1L, $r10, $r9
3169	slli    P3H, P3H, #16
3170	srli    $r15, P3L, #16
3171	or      P3H, P3H, $r15
3172
3173	#SUB(P3H, P1L)
3174	move    $r15, P3H
3175	sub     P3H, P3H, P1L
3176	slt     $r15, $r15, P3H
3177	beqzs8  .Li26
3178
3179.Lb27:
3180	addi    $r9, $r9, #-1
3181	add     P3H, P3H, O1H
3182	slt     $r15, P3H, O1H
3183	beqzs8  .Lb27
3184.Li26:
3185	divr    P2L, P3H, P3H, P2L
3186	mul     P1L, $r10, P2L
3187	slli    P3H, P3H, #16
3188	move    $r10, #0xffff
3189	and     $r10, P3L, $r10
3190	or      P3H, P3H, $r10
3191
3192	#SUB(P3H, P1L)
3193	move    $r15, P3H
3194	sub     P3H, P3H, P1L
3195	slt     $r15, $r15, P3H
3196	beqzs8  .Li28
3197
3198.Lb29:
3199	addi    P2L, P2L, #-1
3200	add     P3H, P3H, O1H
3201	slt     $r15, P3H, O1H
3202	beqzs8  .Lb29
3203.Li28:
3204	slli    $r9, $r9, #16
3205	add     $r9, $r9, P2L
3206
3207/* This is a 64-bit multiple. */
3208#ifndef __big_endian__
3209/* For little endian: ($r0, $r2) is (high, low). */
3210	move    $r10, $r1
3211#ifndef __NDS32_ISA_V3M__
3212	mulr64	$r0, $r9, $r7
3213#else
3214	pushm	$r2, $r5
3215	move	$r0, $r9
3216	movi	$r1, #0
3217	move	$r2, $r7
3218	movi	$r3, #0
3219	bal	__muldi3
3220	popm	$r2, $r5
3221#endif
3222	move    $r2, $r0
3223	move    $r0, $r1
3224	move    $r1, $r10
3225#else /* __big_endian__ */
3226/* For big endian: ($r1, $r3) is (high, low). */
3227	move	$r10, $r0
3228#ifndef __NDS32_ISA_V3M__
3229	mulr64	$r0, $r9, $r8
3230#else
3231	pushm	$r2, $r5
3232	move	$r0, $r9
3233	movi	$r1, #0
3234	move	$r2, $r7
3235	movi	$r3, #0
3236	bal	__muldi3
3237	popm	$r2, $r5
3238#endif
3239	move	$r3, $r1
3240	move	$r1, $r0
3241	move	$r0, $r10
3242#endif /* __big_endian__ */
3243
3244.Le25:
3245	move    P3L, #0
3246
3247	#SUB(P3L, P2L)
3248	move    $r15, P3L
3249	sub     P3L, P3L, P2L
3250	slt     $r15, $r15, P3L
3251
3252
3253	#SUBCC(P3H, P1L)
3254	beqzs8  .LL53
3255	move    $r15, P3H
3256	sub     P3H, P3H, P1L
3257	slt     $r15, $r15, P3H
3258	beqzs8  .LL54
3259	subi333 P3H, P3H, #1
3260	j       .LL55
3261.LL54:
3262	move    $r15, P3H
3263	subi333 P3H, P3H, #1
3264	slt     $r15, $r15, P3H
3265	j       .LL55
3266.LL53:
3267	move    $r15, P3H
3268	sub     P3H, P3H, P1L
3269	slt     $r15, $r15, P3H
3270.LL55:
3271
3272	beqzs8  .Li30
3273
3274.LGlab4:
3275	addi    $r9, $r9, #-1
3276
3277	#ADD(P3L, O1L)
3278	add     P3L, P3L, O1L
3279	slt     $r15, P3L, O1L
3280
3281
3282	#ADDCC(P3H, O1H)
3283	beqzs8  .LL56
3284	add     P3H, P3H, O1H
3285	slt     $r15, P3H, O1H
3286	beqzs8  .LL57
3287	addi    P3H, P3H, #0x1
3288	j       .LL58
3289.LL57:
3290	move    $r15, #1
3291	add     P3H, P3H, $r15
3292	slt     $r15, P3H, $r15
3293	j       .LL58
3294.LL56:
3295	add     P3H, P3H, O1H
3296	slt     $r15, P3H, O1H
3297.LL58:
3298
3299	beqzs8  .LGlab4
3300.Li30:
3301	sltsi   $r15, P2H, #0
3302	bnezs8  .Li31
3303
3304	#ADD($r9, $r9)
3305	move    $r15, $r9
3306	add     $r9, $r9, $r9
3307	slt     $r15, $r9, $r15
3308
3309	#ADDC(P2H, P2H)
3310	add     P2H, P2H, P2H
3311	add     P2H, P2H, $r15
3312	addi    $r6, $r6, #-1
3313.Li31:
3314	or      $r10, P3H, P3L
3315	beqz    $r10, .Li32
3316	ori     $r9, $r9, #1
3317.Li32:
3318	move    P3H, P2H
3319	move    P3L, $r9
3320	addi    $r10, $r6, #-1
3321	slti    $r15, $r10, #0x7fe
3322	beqzs8  .LGoveund
3323
3324	#ADD(P3L, $0x400)
3325	move    $r15, #0x400
3326	add     P3L, P3L, $r15
3327	slt     $r15, P3L, $r15
3328
3329
3330	#ADDCC(P3H, $0x0)
3331	beqzs8  .LL61
3332	add     P3H, P3H, $r15
3333	slt     $r15, P3H, $r15
3334.LL61:
3335
3336	#ADDC($r6, $0x0)
3337	add     $r6, $r6, $r15
3338
3339.LGlab8:
3340	srli    $r10, P3L, #11
3341	andi    $r10, $r10, #1
3342	sub     P3L, P3L, $r10
3343	srli    P1L, P3L, #11
3344	slli    $r10, P3H, #21
3345	or      P1L, P1L, $r10
3346	slli    $r10, P3H, #1
3347	srli    $r10, $r10, #12
3348	or      P1H, P1H, $r10
3349	slli    $r10, $r6, #20
3350	or      P1H, P1H, $r10
3351
3352.LGret:
3353.LG999:
3354	popm    $r6, $r10
3355	pop     $lp
3356	ret5    $lp
3357
3358.LGoveund:
3359	bgtz    $r6, .LGinf
3360	subri   P2H, $r6, #1
3361	move    P1L, #0
3362.LL62:
3363	move    $r10, #0x20
3364	slt     $r15, P2H, $r10
3365	bnezs8  .LL63
3366	or      P1L, P1L, P3L
3367	move    P3L, P3H
3368	move    P3H, #0
3369	addi    P2H, P2H, #0xffffffe0
3370	bnez    P3L, .LL62
3371.LL63:
3372	beqz    P2H, .LL64
3373	move    P2L, P3H
3374	move    $r10, P3L
3375	srl     P3L, P3L, P2H
3376	srl     P3H, P3H, P2H
3377	subri   P2H, P2H, #0x20
3378	sll     P2L, P2L, P2H
3379	or      P3L, P3L, P2L
3380	sll     $r10, $r10, P2H
3381	or      P1L, P1L, $r10
3382	beqz    P1L, .LL64
3383	ori     P3L, P3L, #1
3384.LL64:
3385	#ADD(P3L, $0x400)
3386	move    $r15, #0x400
3387	add     P3L, P3L, $r15
3388	slt     $r15, P3L, $r15
3389
3390	#ADDC(P3H, $0x0)
3391	add     P3H, P3H, $r15
3392	srli    $r6, P3H, #31
3393	j       .LGlab8
3394
3395.LGspecA:
3396	#ADD(P3L, P3L)
3397	move    $r15, P3L
3398	add     P3L, P3L, P3L
3399	slt     $r15, P3L, $r15
3400
3401	#ADDC(P3H, P3H)
3402	add     P3H, P3H, P3H
3403	add     P3H, P3H, $r15
3404	bnez    $r6, .Li33
3405	or      $r10, P3H, P3L
3406	beqz    $r10, .Li33
3407
3408
3409	#NORMd($r4, P2H, P2L)
3410	bnez    P3H, .LL65
3411	bnez    P3L, .LL66
3412	move    $r6, #0
3413	j       .LL67
3414.LL66:
3415	move    P3H, P3L
3416	move    P3L, #0
3417	move    P2H, #32
3418	sub     $r6, $r6, P2H
3419.LL65:
3420#ifndef __big_endian__
3421#ifdef __NDS32_PERF_EXT__
3422	clz	$r3, $r5
3423#else
3424	pushm	$r0, $r2
3425	pushm	$r4, $r5
3426	move	$r0, $r5
3427	bal	__clzsi2
3428	move	$r3, $r0
3429	popm	$r4, $r5
3430	popm	$r0, $r2
3431#endif
3432#else /* __big_endian__ */
3433#ifdef __NDS32_PERF_EXT__
3434	clz	$r2, $r4
3435#else
3436	pushm	$r0, $r1
3437	pushm	$r3, $r5
3438	move	$r0, $r4
3439	bal	__clzsi2
3440	move	$r2, $r0
3441	popm	$r3, $r5
3442	popm	$r0, $r1
3443#endif
3444#endif /* __big_endian_ */
3445	beqz    P2H, .LL67
3446	sub     $r6, $r6, P2H
3447	subri   P2L, P2H, #32
3448	srl     P2L, P3L, P2L
3449	sll     P3L, P3L, P2H
3450	sll     P3H, P3H, P2H
3451	or      P3H, P3H, P2L
3452.LL67:
3453	#NORMd End
3454
3455	j       .LGlab1
3456.Li33:
3457	bne     $r6, $r9, .Li35
3458	slli    $r10, O1H, #1
3459	or      $r10, $r10, O1L
3460	beqz    $r10, .LGnan
3461.Li35:
3462	subri   $r15, $r9, #0x7ff
3463	beqzs8  .LGspecB
3464	beqz    $r6, .LGret
3465	or      $r10, P3H, P3L
3466	bnez    $r10, .LGnan
3467
3468.LGinf:
3469	move    $r10, #0x7ff00000
3470	or      P1H, P1H, $r10
3471	move    P1L, #0
3472	j       .LGret
3473
3474.LGspecB:
3475	#ADD(O1L, O1L)
3476	move    $r15, O1L
3477	add     O1L, O1L, O1L
3478	slt     $r15, O1L, $r15
3479
3480	#ADDC(O1H, O1H)
3481	add     O1H, O1H, O1H
3482	add     O1H, O1H, $r15
3483	bnez    $r9, .Li36
3484	or      $r10, O1H, O1L
3485	beqz    $r10, .LGinf
3486
3487
3488	#NORMd($r7, P2H, P2L)
3489	bnez    O1H, .LL68
3490	bnez    O1L, .LL69
3491	move    $r9, #0
3492	j       .LL70
3493.LL69:
3494	move    O1H, O1L
3495	move    O1L, #0
3496	move    P2H, #32
3497	sub     $r9, $r9, P2H
3498.LL68:
3499#ifndef __big_endian__
3500#ifdef __NDS32_PERF_EXT__
3501	clz	$r3, $r8
3502#else
3503	pushm	$r0, $r2
3504	pushm	$r4, $r5
3505	move	$r0, $r8
3506	bal	__clzsi2
3507	move	$r3, $r0
3508	popm	$r4, $r5
3509	popm	$r0, $r2
3510#endif
3511#else /* __big_endian__ */
3512#ifdef __NDS32_PERF_EXT__
3513	clz	$r2, $r7
3514#else
3515	pushm	$r0, $r1
3516	pushm	$r3, $r5
3517	move	$r0, $r7
3518	bal	__clzsi2
3519	move	$r2, $r0
3520	popm	$r3, $r5
3521	popm	$r0, $r1
3522#endif
3523#endif /* __big_endian__ */
3524	beqz    P2H, .LL70
3525	sub     $r9, $r9, P2H
3526	subri   P2L, P2H, #32
3527	srl     P2L, O1L, P2L
3528	sll     O1L, O1L, P2H
3529	sll     O1H, O1H, P2H
3530	or      O1H, O1H, P2L
3531.LL70:
3532	#NORMd End
3533
3534	j       .LGlab2
3535.Li36:
3536	or      $r10, O1H, O1L
3537	beqz    $r10, .Li38
3538
3539.LGnan:
3540	move    P1H, #0xfff80000
3541.Li38:
3542	move    P1L, #0
3543	j       .LGret
3544	.size __divdf3, .-__divdf3
3545#endif /* L_div_df */
3546
3547
3548
3549#ifdef L_negate_sf
3550
3551	.text
3552	.align	2
3553	.global	__negsf2
3554	.type	__negsf2, @function
3555__negsf2:
3556	push    $lp
3557
3558	move    $r1, #0x80000000
3559	xor     $r0, $r0, $r1
3560
3561.LN999:
3562	pop     $lp
3563	ret5    $lp
3564	.size __negsf2, .-__negsf2
3565#endif /* L_negate_sf */
3566
3567
3568
3569#ifdef L_negate_df
3570
3571#ifndef __big_endian__
3572	#define P1H     $r1
3573#else
3574	#define P1H     $r0
3575#endif
3576	.text
3577	.align	2
3578	.global	__negdf2
3579	.type	__negdf2, @function
3580__negdf2:
3581	push    $lp
3582
3583	move    $r2, #0x80000000
3584	xor     P1H, P1H, $r2
3585
3586.LP999:
3587	pop     $lp
3588	ret5    $lp
3589	.size __negdf2, .-__negdf2
3590#endif /* L_negate_df */
3591
3592
3593
3594#ifdef L_sf_to_df
3595
3596#ifndef __big_endian__
3597	#define O1L     $r1
3598	#define O1H     $r2
3599#else
3600	#define O1H     $r1
3601	#define O1L     $r2
3602#endif
3603	.text
3604	.align	2
3605	.global	__extendsfdf2
3606	.type	__extendsfdf2, @function
3607__extendsfdf2:
3608	push    $lp
3609
3610	srli    $r3, $r0, #23
3611	andi    $r3, $r3, #0xff
3612	move    $r5, #0x80000000
3613	and     O1H, $r0, $r5
3614	addi    $r5, $r3, #-1
3615	slti    $r15, $r5, #0xfe
3616	beqzs8  .LJspec
3617
3618.LJlab1:
3619	addi    $r3, $r3, #0x380
3620	slli    $r5, $r0, #9
3621	srli    $r5, $r5, #12
3622	or      O1H, O1H, $r5
3623	slli    O1L, $r0, #29
3624
3625.LJret:
3626	slli    $r5, $r3, #20
3627	or      O1H, O1H, $r5
3628	move    $r0, $r1
3629	move    $r1, $r2
3630
3631.LJ999:
3632	pop     $lp
3633	ret5    $lp
3634
3635.LJspec:
3636	move    O1L, #0
3637	add     $r0, $r0, $r0
3638	beqz    $r0, .LJret
3639	bnez    $r3, .Li42
3640
3641.Lb43:
3642	addi    $r3, $r3, #-1
3643	add     $r0, $r0, $r0
3644	move    $r5, #0x800000
3645	slt     $r15, $r0, $r5
3646	bnezs8  .Lb43
3647	j       .LJlab1
3648.Li42:
3649	move    $r3, #0x7ff
3650	move    $r5, #0xff000000
3651	slt     $r15, $r5, $r0
3652	beqzs8  .LJret
3653	move    O1H, #0xfff80000
3654	j       .LJret
3655	.size __extendsfdf2, .-__extendsfdf2
3656#endif /* L_sf_to_df */
3657
3658
3659
3660#ifdef L_df_to_sf
3661
3662#ifndef __big_endian__
3663	#define P1L     $r0
3664	#define P1H     $r1
3665	#define P2L     $r2
3666	#define P2H     $r3
3667#else
3668	#define P1H     $r0
3669	#define P1L     $r1
3670	#define P2H     $r2
3671	#define P2L     $r3
3672#endif
3673	.text
3674	.align	2
3675	.global	__truncdfsf2
3676	.type	__truncdfsf2, @function
3677__truncdfsf2:
3678	push    $lp
3679	pushm   $r6, $r8
3680
3681	slli    P2H, P1H, #11
3682	srli    $r7, P1L, #21
3683	or      P2H, P2H, $r7
3684	slli    P2L, P1L, #11
3685	move    $r7, #0x80000000
3686	or      P2H, P2H, $r7
3687	and     $r5, P1H, $r7
3688	slli    $r4, P1H, #1
3689	srli    $r4, $r4, #21
3690	addi    $r4, $r4, #0xfffffc80
3691	addi    $r7, $r4, #-1
3692	slti    $r15, $r7, #0xfe
3693	beqzs8  .LKspec
3694
3695.LKlab1:
3696	beqz    P2L, .Li45
3697	ori     P2H, P2H, #1
3698.Li45:
3699	#ADD(P2H, $0x80)
3700	move    $r15, #0x80
3701	add     P2H, P2H, $r15
3702	slt     $r15, P2H, $r15
3703
3704	#ADDC($r4, $0x0)
3705	add     $r4, $r4, $r15
3706	srli    $r7, P2H, #8
3707	andi    $r7, $r7, #1
3708	sub     P2H, P2H, $r7
3709	slli    P2H, P2H, #1
3710	srli    P2H, P2H, #9
3711	slli    $r7, $r4, #23
3712	or      P2H, P2H, $r7
3713	or      $r0, P2H, $r5
3714
3715.LK999:
3716	popm    $r6, $r8
3717	pop     $lp
3718	ret5    $lp
3719
3720.LKspec:
3721	subri   $r15, $r4, #0x47f
3722	bnezs8  .Li46
3723	slli    $r7, P2H, #1
3724	or      $r7, $r7, P2L
3725	beqz    $r7, .Li46
3726	move    $r0, #0xffc00000
3727	j       .LK999
3728.Li46:
3729	sltsi   $r15, $r4, #0xff
3730	bnezs8  .Li48
3731	move    $r7, #0x7f800000
3732	or      $r0, $r5, $r7
3733	j       .LK999
3734.Li48:
3735	subri   $r6, $r4, #1
3736	move    $r7, #0x20
3737	slt     $r15, $r6, $r7
3738	bnezs8  .Li49
3739	move    $r0, $r5
3740	j       .LK999
3741.Li49:
3742	subri   $r8, $r6, #0x20
3743	sll     $r7, P2H, $r8
3744	or      P2L, P2L, $r7
3745	srl     P2H, P2H, $r6
3746	move    $r4, #0
3747	move    $r7, #0x80000000
3748	or      P2H, P2H, $r7
3749	j       .LKlab1
3750	.size __truncdfsf2, .-__truncdfsf2
3751#endif /* L_df_to_sf */
3752
3753
3754
3755#ifdef L_df_to_si
3756
3757#ifndef __big_endian__
3758	#define P1L     $r0
3759	#define P1H     $r1
3760#else
3761	#define P1H     $r0
3762	#define P1L     $r1
3763#endif
3764	.global	__fixdfsi
3765	.type	__fixdfsi, @function
3766__fixdfsi:
3767	push    $lp
3768	pushm   $r6, $r6
3769
3770	slli    $r3, P1H, #11
3771	srli    $r6, P1L, #21
3772	or      $r3, $r3, $r6
3773	move    $r6, #0x80000000
3774	or      $r3, $r3, $r6
3775	slli    $r6, P1H, #1
3776	srli    $r6, $r6, #21
3777	subri   $r2, $r6, #0x41e
3778	blez    $r2, .LLnaninf
3779	move    $r6, #0x20
3780	slt     $r15, $r2, $r6
3781	bnezs8  .LL72
3782	move    $r3, #0
3783.LL72:
3784	srl     $r3, $r3, $r2
3785	sltsi   $r15, P1H, #0
3786	beqzs8  .Li50
3787	subri   $r3, $r3, #0
3788.Li50:
3789	move    $r0, $r3
3790
3791.LL999:
3792	popm    $r6, $r6
3793	pop     $lp
3794	ret5    $lp
3795
3796.LLnaninf:
3797	beqz    P1L, .Li51
3798	ori     P1H, P1H, #1
3799.Li51:
3800	move    $r6, #0x7ff00000
3801	slt     $r15, $r6, P1H
3802	beqzs8  .Li52
3803	move    $r0, #0x80000000
3804	j       .LL999
3805.Li52:
3806	move    $r0, #0x7fffffff
3807	j       .LL999
3808	.size __fixdfsi, .-__fixdfsi
3809#endif /* L_df_to_si */
3810
3811
3812
3813#ifdef L_fixsfdi
3814
3815#ifndef __big_endian__
3816	#define O1L     $r1
3817	#define O1H     $r2
3818#else
3819	#define O1H     $r1
3820	#define O1L     $r2
3821#endif
3822	.text
3823	.align	2
3824	.global	__fixsfdi
3825	.type	__fixsfdi, @function
3826__fixsfdi:
3827	push    $lp
3828
3829	srli    $r3, $r0, #23
3830	andi    $r3, $r3, #0xff
3831	slli    O1H, $r0, #8
3832	move    $r5, #0x80000000
3833	or      O1H, O1H, $r5
3834	move    O1L, #0
3835	sltsi   $r15, $r3, #0xbe
3836	beqzs8  .LCinfnan
3837	subri   $r3, $r3, #0xbe
3838.LL8:
3839	move    $r5, #0x20
3840	slt     $r15, $r3, $r5
3841	bnezs8  .LL9
3842	move    O1L, O1H
3843	move    O1H, #0
3844	addi    $r3, $r3, #0xffffffe0
3845	bnez    O1L, .LL8
3846.LL9:
3847	beqz    $r3, .LL10
3848	move    $r4, O1H
3849	srl     O1L, O1L, $r3
3850	srl     O1H, O1H, $r3
3851	subri   $r3, $r3, #0x20
3852	sll     $r4, $r4, $r3
3853	or      O1L, O1L, $r4
3854.LL10:
3855	sltsi   $r15, $r0, #0
3856	beqzs8  .LCret
3857
3858	subri   O1H, O1H, #0
3859	beqz    O1L, .LL11
3860	subri   O1L, O1L, #0
3861	subi45  O1H, #1
3862.LL11:
3863
3864.LCret:
3865	move    $r0, $r1
3866	move    $r1, $r2
3867
3868.LC999:
3869	pop     $lp
3870	ret5    $lp
3871
3872.LCinfnan:
3873	sltsi   $r15, $r0, #0
3874	bnezs8  .LCret3
3875	subri   $r15, $r3, #0xff
3876	bnezs8  .Li7
3877	slli    $r5, O1H, #1
3878	beqz    $r5, .Li7
3879
3880.LCret3:
3881	move    O1H, #0x80000000
3882	j       .LCret
3883.Li7:
3884	move    O1H, #0x7fffffff
3885	move    O1L, #-1
3886	j       .LCret
3887	.size	__fixsfdi, .-__fixsfdi
3888#endif /* L_fixsfdi */
3889
3890
3891
3892#ifdef L_fixdfdi
3893
3894#ifndef __big_endian__
3895	#define P1L     $r0
3896	#define P1H     $r1
3897	#define O1L     $r3
3898	#define O1H     $r4
3899#else
3900	#define P1H     $r0
3901	#define P1L     $r1
3902	#define O1H     $r3
3903	#define O1L     $r4
3904#endif
3905	.text
3906	.align	2
3907	.global	__fixdfdi
3908	.type	__fixdfdi, @function
3909__fixdfdi:
3910	push    $lp
3911	pushm   $r6, $r6
3912
3913	slli    $r5, P1H, #1
3914	srli    $r5, $r5, #21
3915	slli    O1H, P1H, #11
3916	srli    $r6, P1L, #21
3917	or      O1H, O1H, $r6
3918	slli    O1L, P1L, #11
3919	move    $r6, #0x80000000
3920	or      O1H, O1H, $r6
3921	slti    $r15, $r5, #0x43e
3922	beqzs8  .LCnaninf
3923	subri   $r2, $r5, #0x43e
3924.LL14:
3925	move    $r6, #0x20
3926	slt     $r15, $r2, $r6
3927	bnezs8  .LL15
3928	move    O1L, O1H
3929	move    O1H, #0
3930	addi    $r2, $r2, #0xffffffe0
3931	bnez    O1L, .LL14
3932.LL15:
3933	beqz    $r2, .LL16
3934	move    P1L, O1H
3935	srl     O1L, O1L, $r2
3936	srl     O1H, O1H, $r2
3937	subri   $r2, $r2, #0x20
3938	sll     P1L, P1L, $r2
3939	or      O1L, O1L, P1L
3940.LL16:
3941	sltsi   $r15, P1H, #0
3942	beqzs8  .LCret
3943
3944	subri   O1H, O1H, #0
3945	beqz    O1L, .LL17
3946	subri   O1L, O1L, #0
3947	subi45  O1H, #1
3948.LL17:
3949
3950.LCret:
3951	move    P1L, O1L
3952	move    P1H, O1H
3953
3954.LC999:
3955	popm    $r6, $r6
3956	pop     $lp
3957	ret5    $lp
3958
3959.LCnaninf:
3960	sltsi   $r15, P1H, #0
3961	bnezs8  .LCret3
3962	subri   $r15, $r5, #0x7ff
3963	bnezs8  .Li5
3964	slli    $r6, O1H, #1
3965	or      $r6, $r6, O1L
3966	beqz    $r6, .Li5
3967
3968.LCret3:
3969	move    O1H, #0x80000000
3970	move    O1L, #0
3971	j       .LCret
3972.Li5:
3973	move    O1H, #0x7fffffff
3974	move    O1L, #-1
3975	j       .LCret
3976	.size	__fixdfdi, .-__fixdfdi
3977#endif /* L_fixdfdi */
3978
3979
3980
3981#ifdef L_fixunssfsi
3982
3983	.global	__fixunssfsi
3984	.type	__fixunssfsi, @function
3985__fixunssfsi:
3986	push    $lp
3987
3988	slli    $r1, $r0, #8
3989	move    $r3, #0x80000000
3990	or      $r1, $r1, $r3
3991	srli    $r3, $r0, #23
3992	andi    $r3, $r3, #0xff
3993	subri   $r2, $r3, #0x9e
3994	sltsi   $r15, $r2, #0
3995	bnezs8  .LLspec
3996	sltsi   $r15, $r2, #0x20
3997	bnezs8  .Li45
3998	move    $r0, #0
3999	j       .LL999
4000.Li45:
4001	srl     $r1, $r1, $r2
4002	sltsi   $r15, $r0, #0
4003	beqzs8  .Li46
4004	subri   $r1, $r1, #0
4005.Li46:
4006	move    $r0, $r1
4007
4008.LL999:
4009	pop     $lp
4010	ret5    $lp
4011
4012.LLspec:
4013	move    $r3, #0x7f800000
4014	slt     $r15, $r3, $r0
4015	beqzs8  .Li47
4016	move    $r0, #0x80000000
4017	j       .LL999
4018.Li47:
4019	move    $r0, #-1
4020	j       .LL999
4021	.size	__fixunssfsi, .-__fixunssfsi
4022#endif /* L_fixunssfsi */
4023
4024
4025
4026#ifdef L_fixunsdfsi
4027
4028#ifndef __big_endian__
4029	#define P1L     $r0
4030	#define P1H     $r1
4031#else
4032	#define P1H     $r0
4033	#define P1L     $r1
4034#endif
4035	.text
4036	.align	2
4037	.global	__fixunsdfsi
4038	.type	__fixunsdfsi, @function
4039__fixunsdfsi:
4040	push    $lp
4041	pushm   $r6, $r6
4042
4043	slli    $r3, P1H, #11
4044	srli    $r6, P1L, #21
4045	or      $r3, $r3, $r6
4046	move    $r6, #0x80000000
4047	or      $r3, $r3, $r6
4048	slli    $r6, P1H, #1
4049	srli    $r6, $r6, #21
4050	subri   $r2, $r6, #0x41e
4051	sltsi   $r15, $r2, #0
4052	bnezs8  .LNnaninf
4053	move    $r6, #0x20
4054	slt     $r15, $r2, $r6
4055	bnezs8  .LL73
4056	move    $r3, #0
4057.LL73:
4058	srl     $r3, $r3, $r2
4059	sltsi   $r15, P1H, #0
4060	beqzs8  .Li53
4061	subri   $r3, $r3, #0
4062.Li53:
4063	move    $r0, $r3
4064
4065.LN999:
4066	popm    $r6, $r6
4067	pop     $lp
4068	ret5    $lp
4069
4070.LNnaninf:
4071	beqz    P1L, .Li54
4072	ori     P1H, P1H, #1
4073.Li54:
4074	move    $r6, #0x7ff00000
4075	slt     $r15, $r6, P1H
4076	beqzs8  .Li55
4077	move    $r0, #0x80000000
4078	j       .LN999
4079.Li55:
4080	move    $r0, #-1
4081	j       .LN999
4082	.size __fixunsdfsi, .-__fixunsdfsi
4083#endif /* L_fixunsdfsi */
4084
4085
4086
4087#ifdef L_fixunssfdi
4088
4089#ifndef __big_endian__
4090	#define O1L     $r1
4091	#define O1H     $r2
4092#else
4093	#define O1H     $r1
4094	#define O1L     $r2
4095#endif
4096	.text
4097	.align	2
4098	.global	__fixunssfdi
4099	.type	__fixunssfdi, @function
4100__fixunssfdi:
4101	push    $lp
4102
4103	srli    $r3, $r0, #23
4104	andi    $r3, $r3, #0xff
4105	slli    O1H, $r0, #8
4106	move    $r5, #0x80000000
4107	or      O1H, O1H, $r5
4108	move    O1L, #0
4109	sltsi   $r15, $r3, #0xbe
4110	beqzs8  .LDinfnan
4111	subri   $r3, $r3, #0xbe
4112.LL12:
4113	move    $r5, #0x20
4114	slt     $r15, $r3, $r5
4115	bnezs8  .LL13
4116	move    O1L, O1H
4117	move    O1H, #0
4118	addi    $r3, $r3, #0xffffffe0
4119	bnez    O1L, .LL12
4120.LL13:
4121	beqz    $r3, .LL14
4122	move    $r4, O1H
4123	srl     O1L, O1L, $r3
4124	srl     O1H, O1H, $r3
4125	subri   $r3, $r3, #0x20
4126	sll     $r4, $r4, $r3
4127	or      O1L, O1L, $r4
4128.LL14:
4129	sltsi   $r15, $r0, #0
4130	beqzs8  .LDret
4131
4132	subri   O1H, O1H, #0
4133	beqz    O1L, .LL15
4134	subri   O1L, O1L, #0
4135	subi45  O1H, #1
4136.LL15:
4137
4138.LDret:
4139	move    $r0, $r1
4140	move    $r1, $r2
4141
4142.LD999:
4143	pop     $lp
4144	ret5    $lp
4145
4146.LDinfnan:
4147	move    O1H, #0x80000000
4148	move    O1L, #0
4149	j       .LDret
4150	.size	__fixunssfdi, .-__fixunssfdi
4151#endif /* L_fixunssfdi */
4152
4153
4154
4155#ifdef L_fixunsdfdi
4156
4157#ifndef __big_endian__
4158	#define P1L     $r0
4159	#define P1H     $r1
4160	#define O1L     $r3
4161	#define O1H     $r4
4162#else
4163	#define P1H     $r0
4164	#define P1L     $r1
4165	#define O1H     $r3
4166	#define O1L     $r4
4167#endif
4168	.text
4169	.align	2
4170	.global	__fixunsdfdi
4171	.type	__fixunsdfdi, @function
4172__fixunsdfdi:
4173	push    $lp
4174	pushm   $r6, $r6
4175
4176	slli    $r5, P1H, #1
4177	srli    $r5, $r5, #21
4178	slli    O1H, P1H, #11
4179	srli    $r6, P1L, #21
4180	or      O1H, O1H, $r6
4181	slli    O1L, P1L, #11
4182	move    $r6, #0x80000000
4183	or      O1H, O1H, $r6
4184	slti    $r15, $r5, #0x43e
4185	beqzs8  .LDnaninf
4186	subri   $r2, $r5, #0x43e
4187.LL18:
4188	move    $r6, #0x20
4189	slt     $r15, $r2, $r6
4190	bnezs8  .LL19
4191	move    O1L, O1H
4192	move    O1H, #0
4193	addi    $r2, $r2, #0xffffffe0
4194	bnez    O1L, .LL18
4195.LL19:
4196	beqz    $r2, .LL20
4197	move    P1L, O1H
4198	srl     O1L, O1L, $r2
4199	srl     O1H, O1H, $r2
4200	subri   $r2, $r2, #0x20
4201	sll     P1L, P1L, $r2
4202	or      O1L, O1L, P1L
4203.LL20:
4204	sltsi   $r15, P1H, #0
4205	beqzs8  .LDret
4206
4207	subri   O1H, O1H, #0
4208	beqz    O1L, .LL21
4209	subri   O1L, O1L, #0
4210	subi45  O1H, #1
4211.LL21:
4212
4213.LDret:
4214	move    P1L, O1L
4215	move    P1H, O1H
4216
4217.LD999:
4218	popm    $r6, $r6
4219	pop     $lp
4220	ret5    $lp
4221
4222.LDnaninf:
4223	move    O1H, #0x80000000
4224	move    O1L, #0
4225	j       .LDret
4226	.size	__fixunsdfdi, .-__fixunsdfdi
4227#endif /* L_fixunsdfdi */
4228
4229
4230
4231#ifdef L_si_to_sf
4232
4233	.text
4234	.align	2
4235	.global	__floatsisf
4236	.type	__floatsisf, @function
4237__floatsisf:
4238	push    $lp
4239
4240	move    $r4, #0x80000000
4241	and     $r2, $r0, $r4
4242	beqz    $r0, .Li39
4243	sltsi   $r15, $r0, #0
4244	beqzs8  .Li40
4245	subri   $r0, $r0, #0
4246.Li40:
4247	move    $r1, #0x9e
4248#ifdef __NDS32_PERF_EXT__
4249	clz	$r3, $r0
4250#else
4251	pushm	$r0, $r2
4252	pushm	$r4, $r5
4253	bal	__clzsi2
4254	move	$r3, $r0
4255	popm	$r4, $r5
4256	popm	$r0, $r2
4257#endif
4258	sub     $r1, $r1, $r3
4259	sll     $r0, $r0, $r3
4260
4261	#ADD($r0, $0x80)
4262	move    $r15, #0x80
4263	add     $r0, $r0, $r15
4264	slt     $r15, $r0, $r15
4265
4266	#ADDC($r1, $0x0)
4267	add     $r1, $r1, $r15
4268	srai    $r4, $r0, #8
4269	andi    $r4, $r4, #1
4270	sub     $r0, $r0, $r4
4271	slli    $r0, $r0, #1
4272	srli    $r0, $r0, #9
4273	slli    $r4, $r1, #23
4274	or      $r0, $r0, $r4
4275.Li39:
4276	or      $r0, $r0, $r2
4277
4278.LH999:
4279	pop     $lp
4280	ret5    $lp
4281	.size	__floatsisf, .-__floatsisf
4282#endif /* L_si_to_sf */
4283
4284
4285
4286#ifdef L_si_to_df
4287
4288#ifndef __big_endian__
4289	#define O1L     $r1
4290	#define O1H     $r2
4291	#define O2L     $r4
4292	#define O2H	$r5
4293#else
4294	#define O1H     $r1
4295	#define O1L     $r2
4296	#define O2H     $r4
4297	#define O2L	$r5
4298#endif
4299	.text
4300	.align	2
4301	.global	__floatsidf
4302	.type	__floatsidf, @function
4303__floatsidf:
4304	push    $lp
4305	pushm   $r6, $r6
4306
4307	move    O1L, #0
4308	move    O2H, O1L
4309	move    $r3, O1L
4310	move    O1H, $r0
4311	beqz    O1H, .Li39
4312	sltsi   $r15, O1H, #0
4313	beqzs8  .Li40
4314	move    O2H, #0x80000000
4315
4316	subri   O1H, O1H, #0
4317	beqz    O1L, .LL71
4318	subri   O1L, O1L, #0
4319	subi45  O1H, #1
4320.LL71:
4321.Li40:
4322	move    $r3, #0x41e
4323#ifndef __big_endian__
4324#ifdef __NDS32_PERF_EXT__
4325	clz	$r4, $r2
4326#else
4327	pushm	$r0, $r3
4328	push	$r5
4329	move	$r0, $r2
4330	bal	__clzsi2
4331	move	$r4, $r0
4332	pop	$r5
4333	popm	$r0, $r3
4334#endif
4335#else /* __big_endian__ */
4336#ifdef __NDS32_PERF_EXT__
4337	clz	$r5, $r1
4338#else
4339	pushm	$r0, $r4
4340	move	$r0, $r1
4341	bal	__clzsi2
4342	move	$r5, $r0
4343	popm	$r0, $r4
4344#endif
4345#endif /* __big_endian__ */
4346	sub     $r3, $r3, O2L
4347	sll     O1H, O1H, O2L
4348.Li39:
4349	srli    O2L, O1L, #11
4350	slli    $r6, O1H, #21
4351	or      O2L, O2L, $r6
4352	slli    $r6, O1H, #1
4353	srli    $r6, $r6, #12
4354	or      O2H, O2H, $r6
4355	slli    $r6, $r3, #20
4356	or      O2H, O2H, $r6
4357	move    $r0, $r4
4358	move    $r1, $r5
4359
4360.LH999:
4361	popm    $r6, $r6
4362	pop     $lp
4363	ret5    $lp
4364	.size __floatsidf, .-__floatsidf
4365#endif /* L_si_to_df */
4366
4367
4368
4369#ifdef L_floatdisf
4370
4371#ifndef __big_endian__
4372	#define P1L     $r0
4373	#define P1H     $r1
4374	#define P2L     $r2
4375	#define P2H     $r3
4376#else
4377	#define P1H     $r0
4378	#define P1L     $r1
4379	#define P2H     $r2
4380	#define P2L     $r3
4381#endif
4382	.text
4383	.align	2
4384	.global	__floatdisf
4385	.type	__floatdisf, @function
4386__floatdisf:
4387	push    $lp
4388	pushm   $r6, $r7
4389
4390	move    $r7, #0x80000000
4391	and     $r5, P1H, $r7
4392	move    P2H, P1H
4393	move    P2L, P1L
4394	or      $r7, P1H, P1L
4395	beqz    $r7, .Li1
4396	sltsi   $r15, P1H, #0
4397	beqzs8  .Li2
4398
4399	subri   P2H, P2H, #0
4400	beqz    P2L, .LL1
4401	subri   P2L, P2L, #0
4402	subi45  P2H, #1
4403.LL1:
4404.Li2:
4405	move    $r4, #0xbe
4406
4407
4408	#NORMd($r2, $r6, P1L)
4409	bnez    P2H, .LL2
4410	bnez    P2L, .LL3
4411	move    $r4, #0
4412	j       .LL4
4413.LL3:
4414	move    P2H, P2L
4415	move    P2L, #0
4416	move    $r6, #32
4417	sub     $r4, $r4, $r6
4418.LL2:
4419#ifdef __NDS32_PERF_EXT__
4420	clz	$r6, P2H
4421#else
4422	pushm	$r0, $r5
4423	move	$r0, P2H
4424	bal	__clzsi2
4425	move	$r6, $r0
4426	popm	$r0, $r5
4427#endif
4428	beqz    $r6, .LL4
4429	sub     $r4, $r4, $r6
4430	subri   P1L, $r6, #32
4431	srl     P1L, P2L, P1L
4432	sll     P2L, P2L, $r6
4433	sll     P2H, P2H, $r6
4434	or      P2H, P2H, P1L
4435.LL4:
4436	#NORMd End
4437
4438	beqz    P2L, .Li3
4439	ori     P2H, P2H, #1
4440.Li3:
4441	#ADD(P2H, $0x80)
4442	move    $r15, #0x80
4443	add     P2H, P2H, $r15
4444	slt     $r15, P2H, $r15
4445
4446	#ADDC($r4, $0x0)
4447	add     $r4, $r4, $r15
4448	srli    $r7, P2H, #8
4449	andi    $r7, $r7, #1
4450	sub     P2H, P2H, $r7
4451	slli    P2H, P2H, #1
4452	srli    P2H, P2H, #9
4453	slli    $r7, $r4, #23
4454	or      P2H, P2H, $r7
4455.Li1:
4456	or      $r0, P2H, $r5
4457
4458.LA999:
4459	popm    $r6, $r7
4460	pop     $lp
4461	ret5    $lp
4462	.size	__floatdisf, .-__floatdisf
4463#endif /* L_floatdisf */
4464
4465
4466
4467#ifdef L_floatdidf
4468
4469#ifndef __big_endian__
4470	#define P1L     $r0
4471	#define P1H     $r1
4472	#define P2L     $r2
4473	#define P2H     $r3
4474	#define O1L     $r5
4475	#define O1H     $r6
4476#else
4477	#define P1H     $r0
4478	#define P1L     $r1
4479	#define P2H     $r2
4480	#define P2L     $r3
4481	#define O1H     $r5
4482	#define O1L     $r6
4483#endif
4484	.text
4485	.align	2
4486	.global	__floatdidf
4487	.type	__floatdidf, @function
4488__floatdidf:
4489	push    $lp
4490	pushm   $r6, $r8
4491
4492	move    $r4, #0
4493	move    $r7, $r4
4494	move    P2H, P1H
4495	move    P2L, P1L
4496	or      $r8, P1H, P1L
4497	beqz    $r8, .Li1
4498	move    $r4, #0x43e
4499	sltsi   $r15, P1H, #0
4500	beqzs8  .Li2
4501	move    $r7, #0x80000000
4502
4503	subri   P2H, P2H, #0
4504	beqz    P2L, .LL1
4505	subri   P2L, P2L, #0
4506	subi45  P2H, #1
4507.LL1:
4508
4509.Li2:
4510	#NORMd($r2, O1H, O1L)
4511	bnez    P2H, .LL2
4512	bnez    P2L, .LL3
4513	move    $r4, #0
4514	j       .LL4
4515.LL3:
4516	move    P2H, P2L
4517	move    P2L, #0
4518	move    O1H, #32
4519	sub     $r4, $r4, O1H
4520.LL2:
4521#ifdef __NDS32_PERF_EXT__
4522	clz	O1H, P2H
4523#else /* not __NDS32_PERF_EXT__ */
4524/*
4525  Replace clz with function call.
4526	clz     O1H, P2H
4527  EL:	clz     $r6, $r3
4528  EB:	clz	$r5, $r2
4529*/
4530#ifndef __big_endian__
4531	pushm	$r0, $r5
4532	move	$r0, $r3
4533	bal	__clzsi2
4534	move	$r6, $r0
4535	popm	$r0, $r5
4536#else
4537	pushm	$r0, $r4
4538	move	$r0, $r2
4539	bal	__clzsi2
4540	move	$r5, $r0
4541	popm	$r0, $r4
4542#endif
4543#endif /* not __NDS32_PERF_EXT__ */
4544	beqz    O1H, .LL4
4545	sub     $r4, $r4, O1H
4546	subri   O1L, O1H, #32
4547	srl     O1L, P2L, O1L
4548	sll     P2L, P2L, O1H
4549	sll     P2H, P2H, O1H
4550	or      P2H, P2H, O1L
4551.LL4:
4552	#NORMd End
4553
4554	#ADD(P2L, $0x400)
4555	move    $r15, #0x400
4556	add     P2L, P2L, $r15
4557	slt     $r15, P2L, $r15
4558
4559
4560	#ADDCC(P2H, $0x0)
4561	beqzs8  .LL7
4562	add     P2H, P2H, $r15
4563	slt     $r15, P2H, $r15
4564.LL7:
4565
4566	#ADDC($r4, $0x0)
4567	add     $r4, $r4, $r15
4568	srli    $r8, P2L, #11
4569	andi    $r8, $r8, #1
4570	sub     P2L, P2L, $r8
4571.Li1:
4572	srli    O1L, P2L, #11
4573	slli    $r8, P2H, #21
4574	or      O1L, O1L, $r8
4575	slli    O1H, P2H, #1
4576	srli    O1H, O1H, #12
4577	slli    $r8, $r4, #20
4578	or      O1H, O1H, $r8
4579	or      O1H, O1H, $r7
4580	move    P1L, O1L
4581	move    P1H, O1H
4582
4583.LA999:
4584	popm    $r6, $r8
4585	pop     $lp
4586	ret5    $lp
4587	.size	__floatdidf, .-__floatdidf
4588#endif /* L_floatdidf */
4589
4590
4591
4592#ifdef L_floatunsisf
4593
4594	.text
4595	.align	2
4596	.global	__floatunsisf
4597	.type	__floatunsisf, @function
4598__floatunsisf:
4599	push    $lp
4600
4601	beqz    $r0, .Li41
4602	move    $r2, #0x9e
4603#ifdef __NDS32_PERF_EXT__
4604	clz	$r1, $r0
4605#else
4606	push	$r0
4607	pushm	$r2, $r5
4608	bal	__clzsi2
4609	move	$r1, $r0
4610	popm	$r2, $r5
4611	pop	$r0
4612#endif
4613
4614	sub     $r2, $r2, $r1
4615	sll     $r0, $r0, $r1
4616
4617	#ADD($r0, $0x80)
4618	move    $r15, #0x80
4619	add     $r0, $r0, $r15
4620	slt     $r15, $r0, $r15
4621
4622	#ADDC($r2, $0x0)
4623	add     $r2, $r2, $r15
4624	srli    $r3, $r0, #8
4625	andi    $r3, $r3, #1
4626	sub     $r0, $r0, $r3
4627	slli    $r0, $r0, #1
4628	srli    $r0, $r0, #9
4629	slli    $r3, $r2, #23
4630	or      $r0, $r0, $r3
4631
4632.Li41:
4633.LI999:
4634	pop     $lp
4635	ret5    $lp
4636	.size	__floatunsisf, .-__floatunsisf
4637#endif /* L_floatunsisf */
4638
4639
4640
4641#ifdef L_floatunsidf
4642
4643#ifndef __big_endian__
4644	#define O1L     $r1
4645	#define O1H     $r2
4646	#define O2L     $r4
4647	#define O2H	$r5
4648#else
4649	#define O1H     $r1
4650	#define O1L     $r2
4651	#define O2H     $r4
4652	#define O2L	$r5
4653#endif
4654	.text
4655	.align	2
4656	.global	__floatunsidf
4657	.type	__floatunsidf, @function
4658__floatunsidf:
4659	push    $lp
4660	pushm   $r6, $r6
4661
4662	move    O1L, #0
4663	move    $r3, O1L
4664	move    O1H, $r0
4665	beqz    O1H, .Li41
4666	move    $r3, #0x41e
4667#ifndef __big_endian__
4668#ifdef __NDS32_PERF_EXT__
4669	clz	$r5, $r2
4670#else
4671	pushm	$r0, $r4
4672	move	$r0, $r2
4673	bal	__clzsi2
4674	move	$r5, $r0
4675	popm	$r0, $r4
4676#endif
4677#else /* __big_endian__ */
4678#ifdef __NDS32_PERF_EXT__
4679	clz	$r4, $r1
4680#else
4681	pushm	$r0, $r3
4682	push	$r5
4683	move	$r0, $r1
4684	bal	__clzsi2
4685	move	$r4, $r0
4686	pop	$r5
4687	popm	$r0, $r3
4688#endif
4689#endif /* __big_endian__ */
4690	sub     $r3, $r3, O2H
4691	sll     O1H, O1H, O2H
4692.Li41:
4693	srli    O2L, O1L, #11
4694	slli    $r6, O1H, #21
4695	or      O2L, O2L, $r6
4696	slli    O2H, O1H, #1
4697	srli    O2H, O2H, #12
4698	slli    $r6, $r3, #20
4699	or      O2H, O2H, $r6
4700	move    $r0, $r4
4701	move    $r1, $r5
4702
4703.LI999:
4704	popm    $r6, $r6
4705	pop     $lp
4706	ret5    $lp
4707	.size __floatunsidf, .-__floatunsidf
4708#endif /* L_floatunsidf */
4709
4710
4711
4712#ifdef L_floatundisf
4713
4714#ifndef __big_endian__
4715	#define P1L     $r0
4716	#define P1H     $r1
4717	#define P2L     $r2
4718	#define P2H     $r3
4719#else
4720	#define P1H     $r0
4721	#define P1L     $r1
4722	#define P2H     $r2
4723	#define P2L     $r3
4724#endif
4725	.text
4726	.align	2
4727	.global	__floatundisf
4728	.type	__floatundisf, @function
4729__floatundisf:
4730	push    $lp
4731	pushm   $r6, $r6
4732
4733	move    P2H, P1H
4734	move    P2L, P1L
4735	or      $r6, P1H, P1L
4736	beqz    $r6, .Li4
4737	move    $r4, #0xbe
4738
4739
4740	#NORMd($r2, $r5, P1L)
4741	bnez    P2H, .LL5
4742	bnez    P2L, .LL6
4743	move    $r4, #0
4744	j       .LL7
4745.LL6:
4746	move    P2H, P2L
4747	move    P2L, #0
4748	move    $r5, #32
4749	sub     $r4, $r4, $r5
4750.LL5:
4751#ifdef __NDS32_PERF_EXT__
4752	clz	$r5, P2H
4753#else
4754	pushm	$r0, $r4
4755	move	$r0, P2H
4756	bal	__clzsi2
4757	move	$r5, $r0
4758	popm	$r0, $r4
4759#endif
4760	beqz    $r5, .LL7
4761	sub     $r4, $r4, $r5
4762	subri   P1L, $r5, #32
4763	srl     P1L, P2L, P1L
4764	sll     P2L, P2L, $r5
4765	sll     P2H, P2H, $r5
4766	or      P2H, P2H, P1L
4767.LL7:
4768	#NORMd End
4769
4770	beqz    P2L, .Li5
4771	ori     P2H, P2H, #1
4772.Li5:
4773	#ADD(P2H, $0x80)
4774	move    $r15, #0x80
4775	add     P2H, P2H, $r15
4776	slt     $r15, P2H, $r15
4777
4778	#ADDC($r4, $0x0)
4779	add     $r4, $r4, $r15
4780	srli    $r6, P2H, #8
4781	andi    $r6, $r6, #1
4782	sub     P2H, P2H, $r6
4783	slli    P2H, P2H, #1
4784	srli    P2H, P2H, #9
4785	slli    $r6, $r4, #23
4786	or      P2H, P2H, $r6
4787.Li4:
4788	move    $r0, P2H
4789
4790.LB999:
4791	popm    $r6, $r6
4792	pop     $lp
4793	ret5    $lp
4794	.size	__floatundisf, .-__floatundisf
4795#endif /* L_floatundisf */
4796
4797
4798
4799#ifdef L_floatundidf
4800
4801#ifndef __big_endian__
4802	#define P1L     $r0
4803	#define P1H     $r1
4804	#define P2L     $r2
4805	#define P2H     $r3
4806	#define O1L     $r5
4807	#define O1H     $r6
4808#else
4809	#define P1H     $r0
4810	#define P1L     $r1
4811	#define P2H     $r2
4812	#define P2L     $r3
4813	#define O1H     $r5
4814	#define O1L     $r6
4815#endif
4816	.text
4817	.align	2
4818	.global	__floatundidf
4819	.type	__floatundidf, @function
4820__floatundidf:
4821	push    $lp
4822	pushm   $r6, $r7
4823
4824	move    $r4, #0
4825	move    P2H, P1H
4826	move    P2L, P1L
4827	or      $r7, P1H, P1L
4828	beqz    $r7, .Li3
4829	move    $r4, #0x43e
4830
4831
4832	#NORMd($r2, O1H, O1L)
4833	bnez    P2H, .LL8
4834	bnez    P2L, .LL9
4835	move    $r4, #0
4836	j       .LL10
4837.LL9:
4838	move    P2H, P2L
4839	move    P2L, #0
4840	move    O1H, #32
4841	sub     $r4, $r4, O1H
4842.LL8:
4843#ifdef __NDS32_PERF_EXT__
4844	clz	O1H, P2H
4845#else /* not __NDS32_PERF_EXT__ */
4846/*
4847  Replace clz with function call.
4848	clz     O1H, P2H
4849  EL:	clz     $r6, $r3
4850  EB:	clz	$r5, $r2
4851*/
4852#ifndef __big_endian__
4853	pushm	$r0, $r5
4854	move	$r0, $r3
4855	bal	__clzsi2
4856	move	$r6, $r0
4857	popm	$r0, $r5
4858#else
4859	pushm	$r0, $r4
4860	move	$r0, $r2
4861	bal	__clzsi2
4862	move	$r5, $r0
4863	popm	$r0, $r4
4864#endif
4865#endif /* not __NDS32_PERF_EXT__ */
4866	beqz    O1H, .LL10
4867	sub     $r4, $r4, O1H
4868	subri   O1L, O1H, #32
4869	srl     O1L, P2L, O1L
4870	sll     P2L, P2L, O1H
4871	sll     P2H, P2H, O1H
4872	or      P2H, P2H, O1L
4873.LL10:
4874	#NORMd End
4875
4876	#ADD(P2L, $0x400)
4877	move    $r15, #0x400
4878	add     P2L, P2L, $r15
4879	slt     $r15, P2L, $r15
4880
4881
4882	#ADDCC(P2H, $0x0)
4883	beqzs8  .LL13
4884	add     P2H, P2H, $r15
4885	slt     $r15, P2H, $r15
4886.LL13:
4887
4888	#ADDC($r4, $0x0)
4889	add     $r4, $r4, $r15
4890	srli    $r7, P2L, #11
4891	andi    $r7, $r7, #1
4892	sub     P2L, P2L, $r7
4893.Li3:
4894	srli    O1L, P2L, #11
4895	slli    $r7, P2H, #21
4896	or      O1L, O1L, $r7
4897	slli    O1H, P2H, #1
4898	srli    O1H, O1H, #12
4899	slli    $r7, $r4, #20
4900	or      O1H, O1H, $r7
4901	move    P1L, O1L
4902	move    P1H, O1H
4903
4904.LB999:
4905	popm    $r6, $r7
4906	pop     $lp
4907	ret5    $lp
4908	.size	__floatundidf, .-__floatundidf
4909#endif /* L_floatundidf */
4910
4911
4912
4913#ifdef L_compare_sf
4914
4915	.text
4916	.align	2
4917	.global	__cmpsf2
4918	.type	__cmpsf2, @function
4919__cmpsf2:
4920	.global	__eqsf2
4921	.type	__eqsf2, @function
4922__eqsf2:
4923	.global	__ltsf2
4924	.type	__ltsf2, @function
4925__ltsf2:
4926	.global	__lesf2
4927	.type	__lesf2, @function
4928__lesf2:
4929	.global	__nesf2
4930	.type	__nesf2, @function
4931__nesf2:
4932	move    $r4, #1
4933	j	.LA
4934
4935	.global	__gesf2
4936	.type	__gesf2, @function
4937__gesf2:
4938	.global	__gtsf2
4939	.type	__gtsf2, @function
4940__gtsf2:
4941	move	$r4, #-1
4942.LA:
4943	push    $lp
4944
4945	slli    $r2, $r0, #1
4946	slli    $r3, $r1, #1
4947	or      $r5, $r2, $r3
4948	beqz    $r5, .LMequ
4949	move    $r5, #0xff000000
4950	slt     $r15, $r5, $r2
4951	bnezs8  .LMnan
4952	slt     $r15, $r5, $r3
4953	bnezs8  .LMnan
4954	srli    $r2, $r2, #1
4955	sltsi   $r15, $r0, #0
4956	beqzs8  .Li48
4957	subri   $r2, $r2, #0
4958.Li48:
4959	srli    $r3, $r3, #1
4960	sltsi   $r15, $r1, #0
4961	beqzs8  .Li49
4962	subri   $r3, $r3, #0
4963.Li49:
4964	slts    $r15, $r2, $r3
4965	beqzs8  .Li50
4966	move    $r0, #-1
4967	j       .LM999
4968.Li50:
4969	slts    $r15, $r3, $r2
4970	beqzs8  .LMequ
4971	move    $r0, #1
4972	j       .LM999
4973
4974.LMequ:
4975	move    $r0, #0
4976
4977.LM999:
4978	pop     $lp
4979	ret5    $lp
4980
4981.LMnan:
4982	move    $r0, $r4
4983	j       .LM999
4984	.size   __cmpsf2, .-__cmpsf2
4985	.size   __eqsf2, .-__eqsf2
4986	.size   __ltsf2, .-__ltsf2
4987	.size   __lesf2, .-__lesf2
4988	.size   __nesf2, .-__nesf2
4989	.size   __gesf2, .-__gesf2
4990	.size   __gtsf2, .-__gtsf2
4991#endif /* L_compare_sf */
4992
4993
4994
4995#ifdef L_compare_df
4996
4997#ifdef __big_endian__
4998	#define P1H     $r0
4999	#define P1L     $r1
5000	#define P2H     $r2
5001	#define P2L     $r3
5002#else
5003	#define P1H     $r1
5004	#define P1L     $r0
5005	#define P2H     $r3
5006	#define P2L     $r2
5007#endif
5008	.align	2
5009	.globl	__gtdf2
5010	.globl	__gedf2
5011	.globl	__ltdf2
5012	.globl	__ledf2
5013	.globl	__eqdf2
5014	.globl	__nedf2
5015	.globl	__cmpdf2
5016	.type	__gtdf2, @function
5017	.type	__gedf2, @function
5018	.type	__ltdf2, @function
5019	.type	__ledf2, @function
5020	.type	__eqdf2, @function
5021	.type	__nedf2, @function
5022	.type	__cmpdf2, @function
5023__gtdf2:
5024__gedf2:
5025	movi	$r4, -1
5026	b	.L1
5027
5028__ltdf2:
5029__ledf2:
5030__cmpdf2:
5031__nedf2:
5032__eqdf2:
5033	movi	$r4, 1
5034.L1:
5035#if defined (__NDS32_ISA_V3M__)
5036	push25	$r10, 0
5037#else
5038	smw.adm	$r6, [$sp], $r9, 0
5039#endif
5040
5041	sethi	$r5, 0x7ff00
5042	and	$r6, P1H, $r5	! r6=aExp
5043	and	$r7, P2H, $r5	! r7=bExp
5044	slli	$r8, P1H, 12	! r8=aSig0
5045	slli	$r9, P2H, 12	! r9=bSig0
5046	beq	$r6, $r5, .L11	! aExp==0x7ff
5047	beq	$r7, $r5, .L12	! bExp==0x7ff
5048.L2:
5049	slli	$ta, P1H, 1	! ta=ahigh<<1
5050	or	$ta, P1L, $ta	!
5051	xor	$r5, P1H, P2H	! r5=ahigh^bhigh
5052	beqz	$ta, .L3	! if(ahigh<<1)==0,go .L3
5053	!-------------------------------
5054	! (ahigh<<1)!=0 || (bhigh<<1)!=0
5055	!-------------------------------
5056.L4:
5057	beqz	$r5, .L5	! ahigh==bhigh, go .L5
5058	!--------------------
5059	! a != b
5060	!--------------------
5061.L6:
5062	bltz	$r5, .L7	! if(aSign!=bSign), go .L7
5063	!--------------------
5064	! aSign==bSign
5065	!--------------------
5066	slt	$ta, $r6, $r7	! ta=(aExp<bExp)
5067	bne	$r6, $r7, .L8	! if(aExp!=bExp),go .L8
5068	slt	$ta, $r8, $r9	! ta=(aSig0<bSig0)
5069	bne	$r8, $r9, .L8	! if(aSig0!=bSig0),go .L8
5070	slt	$ta, P1L, P2L	! ta=(aSig1<bSig1)
5071.L8:
5072	beqz	$ta, .L10	! if(|a|>|b|), go .L10
5073	nor	$r0, P2H, P2H	! if(|a|<|b|),return (~yh)
5074.L14:
5075#if defined (__NDS32_ISA_V3M__)
5076	pop25	$r10, 0
5077#else
5078	lmw.bim	$r6, [$sp], $r9, 0
5079	ret
5080#endif
5081.L10:
5082	ori	$r0, P2H, 1	! return (yh|1)
5083	b	.L14
5084	!--------------------
5085	! (ahigh<<1)=0
5086	!--------------------
5087.L3:
5088	slli	$ta, P2H, 1	! ta=bhigh<<1
5089	or	$ta, P2L, $ta	!
5090	bnez	$ta, .L4	! ta=(bhigh<<1)!=0,go .L4
5091.L5:
5092	xor	$ta, P1L, P2L	! ta=alow^blow
5093	bnez	$ta, .L6	! alow!=blow,go .L6
5094	movi	$r0, 0		! a==b, return 0
5095	b	.L14
5096	!--------------------
5097	! aExp=0x7ff;
5098	!--------------------
5099.L11:
5100	or	P1L, P1L, $r8	! x1=(aSig0|aSig1)
5101	bnez	P1L, .L13	! if(a=nan), go.L13
5102	xor	$ta, $r7, $r5	! ta=(bExp^0x7ff)
5103	bnez	$ta, .L2	! if(bExp!=0x7ff), go .L2
5104	!--------------------
5105	! bExp=0x7ff;
5106	!--------------------
5107.L12:
5108	or	$ta, P2L, $r9	! ta=(bSig0|bSig1)
5109	beqz	$ta, .L2	! if(b!=nan), go .L2
5110.L13:
5111	move	$r0, $r4
5112	b	.L14
5113	!--------------------
5114	! aSign!=bSign
5115	!--------------------
5116.L7:
5117	ori	$r0, P1H, 1	! if(aSign!=bSign), return (ahigh|1)
5118	b	.L14
5119
5120	.size	__gtdf2, .-__gtdf2
5121	.size	__gedf2, .-__gedf2
5122	.size	__ltdf2, .-__ltdf2
5123	.size	__ledf2, .-__ledf2
5124	.size	__eqdf2, .-__eqdf2
5125	.size	__nedf2, .-__nedf2
5126	.size	__cmpdf2, .-__cmpdf2
5127#endif /* L_compare_df */
5128
5129
5130
5131#ifdef L_unord_sf
5132
5133	.text
5134	.align	2
5135	.global	__unordsf2
5136	.type	__unordsf2, @function
5137__unordsf2:
5138	push    $lp
5139
5140	slli    $r2, $r0, #1
5141	move    $r3, #0xff000000
5142	slt     $r15, $r3, $r2
5143	beqzs8  .Li52
5144	move    $r0, #1
5145	j       .LP999
5146.Li52:
5147	slli    $r2, $r1, #1
5148	move    $r3, #0xff000000
5149	slt     $r15, $r3, $r2
5150	beqzs8  .Li53
5151	move    $r0, #1
5152	j       .LP999
5153.Li53:
5154	move    $r0, #0
5155
5156.LP999:
5157	pop     $lp
5158	ret5    $lp
5159	.size	__unordsf2, .-__unordsf2
5160#endif /* L_unord_sf */
5161
5162
5163
5164#ifdef L_unord_df
5165
5166#ifndef __big_endian__
5167	#define P1L     $r0
5168	#define P1H     $r1
5169	#define P2L     $r2
5170	#define P2H     $r3
5171#else
5172	#define P1H     $r0
5173	#define P1L     $r1
5174	#define P2H     $r2
5175	#define P2L     $r3
5176#endif
5177	.text
5178	.align	2
5179	.global	__unorddf2
5180	.type	__unorddf2, @function
5181__unorddf2:
5182	push    $lp
5183
5184	slli    $r4, P1H, #1
5185	beqz    P1L, .Li66
5186	addi    $r4, $r4, #1
5187.Li66:
5188	move    $r5, #0xffe00000
5189	slt     $r15, $r5, $r4
5190	beqzs8  .Li67
5191	move    $r0, #1
5192	j       .LR999
5193.Li67:
5194	slli    $r4, P2H, #1
5195	beqz    P2L, .Li68
5196	addi    $r4, $r4, #1
5197.Li68:
5198	move    $r5, #0xffe00000
5199	slt     $r15, $r5, $r4
5200	beqzs8  .Li69
5201	move    $r0, #1
5202	j       .LR999
5203.Li69:
5204	move    $r0, #0
5205
5206.LR999:
5207	pop     $lp
5208	ret5    $lp
5209	.size __unorddf2, .-__unorddf2
5210#endif /* L_unord_df */
5211/* ------------------------------------------- */
5212/* DPBIT floating point operations for libgcc  */
5213/* ------------------------------------------- */
5214