1; libgcc1 routines for Synopsys DesignWare ARC cpu.
2
3/* Copyright (C) 1995-2018 Free Software Foundation, Inc.
4   Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
5		on behalf of Synopsys Inc.
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17for more details.
18
19Under Section 7 of GPL version 3, you are granted additional
20permissions described in the GCC Runtime Library Exception, version
213.1, as published by the Free Software Foundation.
22
23You should have received a copy of the GNU General Public License and
24a copy of the GCC Runtime Library Exception along with this program;
25see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
26<http://www.gnu.org/licenses/>.  */
27
28/* As a special exception, if you link this library with other files,
29   some of which are compiled with GCC, to produce an executable,
30   this library does not by itself cause the resulting executable
31   to be covered by the GNU General Public License.
32   This exception does not however invalidate any other reasons why
33   the executable file might be covered by the GNU General Public License.  */
34
35
36 /* ANSI concatenation macros.  */
37
38 #define CONCAT1(a, b) CONCAT2(a, b)
39 #define CONCAT2(a, b) a ## b
40
41 /* Use the right prefix for global labels.  */
42
43 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
44
45#ifndef WORKING_ASSEMBLER
46#define abs_l abs
47#define asl_l asl
48#define mov_l mov
49#endif
50
51#define FUNC(X)         .type SYM(X),@function
52#define HIDDEN_FUNC(X)	FUNC(X)` .hidden X
53#define ENDFUNC0(X)     .Lfe_##X: .size X,.Lfe_##X-X
54#define ENDFUNC(X)      ENDFUNC0(X)
55
56
57
58#ifdef  L_mulsi3
59	.section .text
60	.align 4
61
62	.global SYM(__mulsi3)
63SYM(__mulsi3):
64
65/* This the simple version.
66
67  while (a)
68    {
69      if (a & 1)
70	r += b;
71      a >>= 1;
72      b <<= 1;
73    }
74*/
75
76#if defined (__ARC_MUL64__)
77	FUNC(__mulsi3)
78	mulu64 r0,r1
79	j_s.d [blink]
80	mov_s r0,mlo
81	ENDFUNC(__mulsi3)
82#elif defined (__ARC_MPY__)
83	HIDDEN_FUNC(__mulsi3)
84	mpyu	r0,r0,r1
85	nop_s
86	j_s	[blink]
87	ENDFUNC(__mulsi3)
88#elif defined (__ARC_NORM__)
89	FUNC(__mulsi3)
90	norm.f	r2,r0
91	rsub	lp_count,r2,31
92	mov.mi	lp_count,32
93	mov_s	r2,r0
94	mov_s	r0,0
95	lpnz	@.Lend		; loop is aligned
96	lsr.f	r2,r2
97	add.cs	r0,r0,r1
98	add_s	r1,r1,r1
99.Lend:	j_s [blink]
100	ENDFUNC(__mulsi3)
101#elif !defined (__OPTIMIZE_SIZE__) && defined (__ARC_BARREL_SHIFTER__)
102	/* Up to 3.5 times faster than the simpler code below, but larger.  */
103	FUNC(__mulsi3)
104	ror.f	r2,r0,4
105	mov_s	r0,0
106	add3.mi	r0,r0,r1
107	asl.f	r2,r2,2
108	add2.cs	r0,r0,r1
109	jeq_s	[blink]
110.Loop:
111	add1.mi	r0,r0,r1
112	asl.f	r2,r2,2
113	add.cs	r0,r0,r1
114	asl_s	r1,r1,4
115	ror.f	r2,r2,8
116	add3.mi	r0,r0,r1
117	asl.f	r2,r2,2
118	bne.d	.Loop
119	add2.cs	r0,r0,r1
120	j_s	[blink]
121	ENDFUNC(__mulsi3)
122#elif !defined (__OPTIMIZE_SIZE__) /* __ARC601__ */
123	FUNC(__mulsi3)
124	lsr.f r2,r0
125	mov_s r0,0
126	mov_s r3,0
127	add.cs r0,r0,r1
128.Loop:
129	lsr.f r2,r2
130	add1.cs r0,r0,r1
131	lsr.f r2,r2
132	add2.cs r0,r0,r1
133	lsr.f r2,r2
134	add3.cs r0,r0,r1
135	bne.d .Loop
136	add3 r1,r3,r1
137	j_s	[blink]
138	ENDFUNC(__mulsi3)
139#else
140/********************************************************/
141	FUNC(__mulsi3)
142	mov_s r2,0		; Accumulate result here.
143.Lloop:
144	bbit0 r0,0,@.Ly
145	add_s r2,r2,r1		; r += b
146.Ly:
147	lsr_s r0,r0		; a >>= 1
148	asl_s r1,r1		; b <<= 1
149	brne_s r0,0,@.Lloop
150.Ldone:
151	j_s.d [blink]
152	mov_s r0,r2
153	ENDFUNC(__mulsi3)
154/********************************************************/
155#endif
156
157#endif /* L_mulsi3 */
158
159#ifdef  L_umulsidi3
160	.section .text
161	.align 4
162
163	.global SYM(__umulsidi3)
164SYM(__umulsidi3):
165	HIDDEN_FUNC(__umulsidi3)
166/* We need ARC700 /ARC_MUL64 definitions of __umulsidi3 / __umulsi3_highpart
167   in case some code has been compiled without multiply support enabled,
168   but linked with the multiply-support enabled libraries.
169   For ARC601 (i.e. without a barrel shifter), we also use umuldisi3 as our
170   umulsi3_highpart implementation; the use of the latter label doesn't
171   actually benefit ARC601 platforms, but is useful when ARC601 code is linked
172   against other libraries.  */
173#if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \
174	|| !defined (__ARC_BARREL_SHIFTER__)
175	.global SYM(__umulsi3_highpart)
176SYM(__umulsi3_highpart):
177	HIDDEN_FUNC(__umulsi3_highpart)
178#endif
179
180/* This the simple version.
181
182  while (a)
183    {
184      if (a & 1)
185	r += b;
186      a >>= 1;
187      b <<= 1;
188    }
189*/
190#include "ieee-754/arc-ieee-754.h"
191
192#ifdef __ARC_MPY__
193	mov_s	r12,DBL0L
194	mpyu	DBL0L,r12,DBL0H
195	j_s.d	[blink]
196	MPYHU	DBL0H,r12,DBL0H
197#elif defined (__ARC_MUL64__)
198/* Likewise for __ARC_MUL64__ */
199	mulu64 r0,r1
200	mov_s DBL0L,mlo
201	j_s.d [blink]
202	mov_s DBL0H,mhi
203#else /* !__ARC_MPY__ && !__ARC_MUL64__ */
204/* Although it might look tempting to extend this to handle muldi3,
205   using mulsi3 twice with 2.25 cycles per 32 bit add is faster
206   than one loop with 3 or four cycles per 32 bit add.  */
207	asl.f r12,0		; Top part of b.
208	mov_s r2,0		; Accumulate result here.
209	bbit1.d r0,0,@.Ladd
210	mov_s r3,0
211.Llooptst:
212	rlc r12,r12
213	breq r0,0,@.Ldone	; while (a)
214.Lloop:
215	asl.f r1,r1		; b <<= 1
216	bbit0.d r0,1,@.Llooptst
217	lsr r0,r0		; a >>= 1
218	rlc r12,r12
219.Ladd:
220	add.f r3,r3,r1	; r += b
221	brne.d r0,0,@.Lloop	; while (a);
222	adc   r2,r2,r12
223.Ldone:
224	mov_s DBL0L,r3
225	j_s.d [blink]
226	mov DBL0H,r2
227#endif /* !__ARC_MPY__*/
228	ENDFUNC(__umulsidi3)
229#if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \
230	|| !defined (__ARC_BARREL_SHIFTER__)
231	ENDFUNC(__umulsi3_highpart)
232#endif
233#endif /* L_umulsidi3 */
234
235#ifdef  L_umulsi3_highpart
236#include "ieee-754/arc-ieee-754.h"
237/* For use without a barrel shifter, and for ARC700 / ARC_MUL64, the
238   mulsidi3 algorithms above look better, so for these, there is an
239   extra label up there.  */
240#if !defined (__ARC_MPY__) && !defined (__ARC_MUL64__) \
241	&& defined (__ARC_BARREL_SHIFTER__)
242	.global SYM(__umulsi3_highpart)
243SYM(__umulsi3_highpart):
244	HIDDEN_FUNC(__umulsi3_highpart)
245	mov_s r2,0
246	mov_s r3,32
247.Loop:
248	lsr.f r0,r0
249	add.cs.f r2,r2,r1
250	sub_s r3,r3,1
251	brne.d r0,0,.Loop
252	rrc r2,r2
253	j_s.d	[blink]
254/* Make the result register peephole-compatible with mulsidi3.  */
255	lsr DBL0H,r2,r3
256	ENDFUNC(__umulsi3_highpart)
257#endif /* !__ARC_MPY__  && __ARC_BARREL_SHIFTER__ */
258#endif /* L_umulsi3_highpart */
259
260#ifdef L_divmod_tools
261
262; Utilities used by all routines.
263
264	.section .text
265
266/*
267unsigned long
268udivmodsi4(int modwanted, unsigned long num, unsigned long den)
269{
270  unsigned long bit = 1;
271  unsigned long res = 0;
272
273  while (den < num && bit && !(den & (1L<<31)))
274    {
275      den <<=1;
276      bit <<=1;
277    }
278  while (bit)
279    {
280      if (num >= den)
281	{
282	  num -= den;
283	  res |= bit;
284	}
285      bit >>=1;
286      den >>=1;
287    }
288  if (modwanted) return num;
289  return res;
290}
291*/
292
293; inputs: r0 = numerator, r1 = denominator
294; outputs: r0 = quotient, r1 = remainder, r2/r3 trashed
295
296	.balign 4
297	.global SYM(__udivmodsi4)
298	FUNC(__udivmodsi4)
299SYM(__udivmodsi4):
300
301#if defined (__ARC_EA__)
302/* Normalize divisor and divident, and then use the appropriate number of
303   divaw (the number of result bits, or one more) to produce the result.
304   There are some special conditions that need to be tested:
305   - We can only directly normalize unsigned numbers that fit in 31 bit.  For
306     the divisor, we test early on that it is not 'negative'.
307   - divaw can't corrrectly process a divident that is larger than the divisor.
308     We handle this be checking that the divident prior to normalization is
309     not larger than the normalized divisor.  As we then already know then
310     that the divisor fits 31 bit, this check also makes sure that the
311     divident fits.
312   - ordinary normalization of the divident could make it larger than the
313     normalized divisor, which again would be unsuitable for divaw.
314     Thus, we want to shift left the divident by one less, except that we
315     want to leave it alone if it is already 31 bit.  To this end, we
316     double the input to norm with adds.
317   - If the divident has less bits than the divisor, that would leave us
318     with a negative number of divaw to execute.  Although we could use a
319     conditional loop to avoid excess divaw, and then the quotient could
320     be extracted correctly as there'd be more than enough zero bits, the
321     remainder would be shifted left too far, requiring a conditional shift
322     right.  The cost of that shift and the possible mispredict on the
323     conditional loop cost as much as putting in an early check for a zero
324     result.  */
325	bmsk	r3,r0,29
326	brne.d	r3,r0,.Large_dividend
327	norm.f	r2,r1
328	brlo	r0,r1,.Lret0
329	norm	r3,r0
330	asl_s	r1,r1,r2
331	sub_s	r3,r3,1
332	asl_l	r0,r0,r3	; not short to keep loop aligned
333	sub	lp_count,r2,r3
334	lp	.Ldiv_end
335	divaw	r0,r0,r1
336.Ldiv_end:sub_s	r3,r2,1
337	lsr	r1,r0,r2
338	j_s.d	[blink]
339	bmsk	r0,r0,r3
340
341	.balign 4
342.Large_dividend:
343	bmi	.Ltrivial
344	asl_s	r1,r1,r2
345	mov_s	r3,0
346	sub1.f	r4,r0,r1
347	mov.lo	r4,r0
348	mov.hs	r3,2
349	cmp	r4,r1
350	sub.hs	r4,r4,r1
351	add.hs	r3,r3,1
352	mov.f	lp_count,r2
353	lpne	.Ldiv_end2
354	divaw	r4,r4,r1
355.Ldiv_end2:asl	r0,r3,r2
356	lsr	r1,r4,r2
357	sub_s	r2,r2,1
358	bmsk	r4,r4,r2
359	j_s.d	[blink]
360	or.ne	r0,r0,r4
361
362.Lret0:
363	mov_s	r1,r0
364	j_s.d	[blink]
365	mov_l	r0,0
366	.balign	4
367.Ltrivial:
368	sub.f	r1,r0,r1
369	mov.c	r1,r0
370	mov_s	r0,1
371	j_s.d	[blink]
372	mov.c	r0,0
373#elif !defined (__OPTIMIZE_SIZE__) && !defined (__ARC_RF16__)
374#if defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
375	lsr_s r2,r0
376	brhs.d r1,r2,.Lret0_3
377	norm r2,r2
378	norm r3,r1
379	sub_s r3,r3,r2
380	asl_s r1,r1,r3
381	sub1.f 0,r0,r1
382	lsr.cs r1,r1,1
383	sbc r2,r3,0
384	sub1 r0,r0,r1
385	cmp_s r0,r1
386	mov.f lp_count,r2
387#else /* ! __ARC_NORM__ */
388	lsr_s r2,r0
389	brhs.d r1,r2,.Lret0_3
390	mov lp_count,32
391.Lloop1:
392	asl_s r1,r1		; den <<= 1
393	brls.d r1,r2,@.Lloop1
394	sub lp_count,lp_count,1
395	sub_s r0,r0,r1
396	lsr_s r1,r1
397	cmp_s r0,r1
398	xor.f r2,lp_count,31
399#if !defined (__ARCEM__) && !defined (__ARCHS__)
400	mov_s lp_count,r2
401#else
402	mov lp_count,r2
403	nop_s
404#endif /* !__ARCEM__ && !__ARCHS__ */
405#endif /* !__ARC_NORM__ */
406	sub.cc r0,r0,r1
407	mov_s r3,3
408	sbc r3,r3,0
409#if defined (__ARC_BARREL_SHIFTER__)
410	asl_s r3,r3,r2
411	rsub r1,r1,1
412	lpne @.Lloop2_end
413	add1.f r0,r1,r0
414	sub.cc r0,r0,r1
415.Lloop2_end:
416	lsr r1,r0,r2
417#else
418	rsub r1,r1,1
419	lpne @.Lloop2_end
420	asl_s r3,r3
421	add1.f r0,r1,r0
422	sub.cc r0,r0,r1
423.Lloop2_end:
424	lsr_s r1,r0
425	lsr.f lp_count,r2
426	mov.cc r1,r0
427	lpnz 1f
428	lsr_s r1,r1
429	lsr_s r1,r1
4301:
431#endif
432	bmsk r0,r0,r2
433	bclr r0,r0,r2
434	j_s.d [blink]
435	or_s r0,r0,r3
436.Lret0_3:
437#if 0 /* Slightly shorter, but slower.  */
438	lp .Loop3_end
439	brhi.d r1,r0,.Loop3_end
440	sub_s r0,r0,r1
441.Loop3_end
442	add_s r1,r1,r0
443	j_s.d [blink]
444	rsub r0,lp_count,32-1
445#else
446	mov_s r4,r1
447	sub.f r1,r0,r1
448	sbc r0,r0,r0
449	sub.cc.f r1,r1,r4
450	sbc r0,r0,0
451	sub.cc.f r1,r1,r4
452	sbc r0,r0,-3
453	j_s.d [blink]
454	add.cs r1,r1,r4
455#endif
456#else /* Arctangent-A5 */
457	breq_s r1,0,@.Ldivmodend
458	mov_s r2,1		; bit = 1
459	mov_s r3,0		; res = 0
460.Lloop1:
461	brhs r1,r0,@.Lloop2
462	bbit1 r1,31,@.Lloop2
463	asl_s r1,r1		; den <<= 1
464	b.d @.Lloop1
465	asl_s r2,r2		; bit <<= 1
466.Lloop2:
467	brlo r0,r1,@.Lshiftdown
468	sub_s r0,r0,r1		; num -= den
469	or_s r3,r3,r2		; res |= bit
470.Lshiftdown:
471	lsr_s r2,r2		; bit >>= 1
472	lsr_s r1,r1		; den >>= 1
473	brne_s r2,0,@.Lloop2
474.Ldivmodend:
475	mov_s r1,r0		; r1 = mod
476	j.d [blink]
477	mov_s r0,r3		; r0 = res
478/******************************************************/
479#endif
480	ENDFUNC(__udivmodsi4)
481
482#endif
483
484#ifdef  L_udivsi3
485	.section .text
486	.align 4
487
488	.global SYM(__udivsi3)
489	FUNC(__udivsi3)
490SYM(__udivsi3):
491	b @SYM(__udivmodsi4)
492	ENDFUNC(__udivsi3)
493#if 0 /* interferes with linux loader */
494	.section .__arc_profile_forward, "a"
495	.long SYM(__udivsi3)
496	.long SYM(__udivmodsi4)
497	.long 65536
498#endif
499
500#endif /* L_udivsi3 */
501
502#ifdef  L_divsi3
503	.section .text
504	.align 4
505
506	.global SYM(__divsi3)
507	FUNC(__divsi3)
508
509#ifndef __ARC_EA__
510SYM(__divsi3):
511	/* A5 / ARC60? */
512	mov r12,blink
513	xor r11,r0,r1
514	abs_s r0,r0
515	bl.d @SYM(__udivmodsi4)
516	abs_s r1,r1
517	tst r11,r11
518	j.d [r12]
519	neg.mi r0,r0
520#else 	/* !ifndef __ARC_EA__ */
521	;; We can use the abs, norm, divaw and mpy instructions for ARC700
522#define MULDIV
523#ifdef MULDIV
524/* This table has been generated by divtab-arc700.c.  */
525/* 1/512 .. 1/256, normalized.  There is a leading 1 in bit 31.
526   For powers of two, we list unnormalized numbers instead.  The values
527   for powers of 2 are loaded, but not used.  The value for 1 is actually
528   the first instruction after .Lmuldiv.  */
529	.balign 4
530.Ldivtab:
531
532	.long	0x1000000
533	.long	0x80808081
534	.long	0x81020409
535	.long	0x81848DA9
536	.long	0x82082083
537	.long	0x828CBFBF
538	.long	0x83126E98
539	.long	0x83993053
540	.long	0x84210843
541	.long	0x84A9F9C9
542	.long	0x85340854
543	.long	0x85BF3762
544	.long	0x864B8A7E
545	.long	0x86D90545
546	.long	0x8767AB60
547	.long	0x87F78088
548	.long	0x88888889
549	.long	0x891AC73B
550	.long	0x89AE408A
551	.long	0x8A42F871
552	.long	0x8AD8F2FC
553	.long	0x8B70344B
554	.long	0x8C08C08D
555	.long	0x8CA29C05
556	.long	0x8D3DCB09
557	.long	0x8DDA5203
558	.long	0x8E78356E
559	.long	0x8F1779DA
560	.long	0x8FB823EF
561	.long	0x905A3864
562	.long	0x90FDBC0A
563	.long	0x91A2B3C5
564	.long	0x92492493
565	.long	0x92F11385
566	.long	0x939A85C5
567	.long	0x94458095
568	.long	0x94F20950
569	.long	0x95A02569
570	.long	0x964FDA6D
571	.long	0x97012E03
572	.long	0x97B425EE
573	.long	0x9868C80A
574	.long	0x991F1A52
575	.long	0x99D722DB
576	.long	0x9A90E7DA
577	.long	0x9B4C6F9F
578	.long	0x9C09C09D
579	.long	0x9CC8E161
580	.long	0x9D89D89E
581	.long	0x9E4CAD24
582	.long	0x9F1165E8
583	.long	0x9FD809FE
584	.long	0xA0A0A0A1
585	.long	0xA16B312F
586	.long	0xA237C32C
587	.long	0xA3065E40
588	.long	0xA3D70A3E
589	.long	0xA4A9CF1E
590	.long	0xA57EB503
591	.long	0xA655C43A
592	.long	0xA72F053A
593	.long	0xA80A80A9
594	.long	0xA8E83F58
595	.long	0xA9C84A48
596	.long	0xAAAAAAAB
597	.long	0xAB8F69E3
598	.long	0xAC769185
599	.long	0xAD602B59
600	.long	0xAE4C415D
601	.long	0xAF3ADDC7
602	.long	0xB02C0B03
603	.long	0xB11FD3B9
604	.long	0xB21642C9
605	.long	0xB30F6353
606	.long	0xB40B40B5
607	.long	0xB509E68B
608	.long	0xB60B60B7
609	.long	0xB70FBB5B
610	.long	0xB81702E1
611	.long	0xB92143FB
612	.long	0xBA2E8BA3
613	.long	0xBB3EE722
614	.long	0xBC52640C
615	.long	0xBD691048
616	.long	0xBE82FA0C
617	.long	0xBFA02FE9
618	.long	0xC0C0C0C1
619	.long	0xC1E4BBD6
620	.long	0xC30C30C4
621	.long	0xC4372F86
622	.long	0xC565C87C
623	.long	0xC6980C6A
624	.long	0xC7CE0C7D
625	.long	0xC907DA4F
626	.long	0xCA4587E7
627	.long	0xCB8727C1
628	.long	0xCCCCCCCD
629	.long	0xCE168A78
630	.long	0xCF6474A9
631	.long	0xD0B69FCC
632	.long	0xD20D20D3
633	.long	0xD3680D37
634	.long	0xD4C77B04
635	.long	0xD62B80D7
636	.long	0xD79435E6
637	.long	0xD901B204
638	.long	0xDA740DA8
639	.long	0xDBEB61EF
640	.long	0xDD67C8A7
641	.long	0xDEE95C4D
642	.long	0xE070381D
643	.long	0xE1FC780F
644	.long	0xE38E38E4
645	.long	0xE525982B
646	.long	0xE6C2B449
647	.long	0xE865AC7C
648	.long	0xEA0EA0EB
649	.long	0xEBBDB2A6
650	.long	0xED7303B6
651	.long	0xEF2EB720
652	.long	0xF0F0F0F1
653	.long	0xF2B9D649
654	.long	0xF4898D60
655	.long	0xF6603D99
656	.long	0xF83E0F84
657	.long	0xFA232CF3
658	.long	0xFC0FC0FD
659	.long	0xFE03F810
660	.long	0x2000000
661	.long	0x81020409
662	.long	0x82082083
663	.long	0x83126E98
664	.long	0x84210843
665	.long	0x85340854
666	.long	0x864B8A7E
667	.long	0x8767AB60
668	.long	0x88888889
669	.long	0x89AE408A
670	.long	0x8AD8F2FC
671	.long	0x8C08C08D
672	.long	0x8D3DCB09
673	.long	0x8E78356E
674	.long	0x8FB823EF
675	.long	0x90FDBC0A
676	.long	0x92492493
677	.long	0x939A85C5
678	.long	0x94F20950
679	.long	0x964FDA6D
680	.long	0x97B425EE
681	.long	0x991F1A52
682	.long	0x9A90E7DA
683	.long	0x9C09C09D
684	.long	0x9D89D89E
685	.long	0x9F1165E8
686	.long	0xA0A0A0A1
687	.long	0xA237C32C
688	.long	0xA3D70A3E
689	.long	0xA57EB503
690	.long	0xA72F053A
691	.long	0xA8E83F58
692	.long	0xAAAAAAAB
693	.long	0xAC769185
694	.long	0xAE4C415D
695	.long	0xB02C0B03
696	.long	0xB21642C9
697	.long	0xB40B40B5
698	.long	0xB60B60B7
699	.long	0xB81702E1
700	.long	0xBA2E8BA3
701	.long	0xBC52640C
702	.long	0xBE82FA0C
703	.long	0xC0C0C0C1
704	.long	0xC30C30C4
705	.long	0xC565C87C
706	.long	0xC7CE0C7D
707	.long	0xCA4587E7
708	.long	0xCCCCCCCD
709	.long	0xCF6474A9
710	.long	0xD20D20D3
711	.long	0xD4C77B04
712	.long	0xD79435E6
713	.long	0xDA740DA8
714	.long	0xDD67C8A7
715	.long	0xE070381D
716	.long	0xE38E38E4
717	.long	0xE6C2B449
718	.long	0xEA0EA0EB
719	.long	0xED7303B6
720	.long	0xF0F0F0F1
721	.long	0xF4898D60
722	.long	0xF83E0F84
723	.long	0xFC0FC0FD
724	.long	0x4000000
725	.long	0x82082083
726	.long	0x84210843
727	.long	0x864B8A7E
728	.long	0x88888889
729	.long	0x8AD8F2FC
730	.long	0x8D3DCB09
731	.long	0x8FB823EF
732	.long	0x92492493
733	.long	0x94F20950
734	.long	0x97B425EE
735	.long	0x9A90E7DA
736	.long	0x9D89D89E
737	.long	0xA0A0A0A1
738	.long	0xA3D70A3E
739	.long	0xA72F053A
740	.long	0xAAAAAAAB
741	.long	0xAE4C415D
742	.long	0xB21642C9
743	.long	0xB60B60B7
744	.long	0xBA2E8BA3
745	.long	0xBE82FA0C
746	.long	0xC30C30C4
747	.long	0xC7CE0C7D
748	.long	0xCCCCCCCD
749	.long	0xD20D20D3
750	.long	0xD79435E6
751	.long	0xDD67C8A7
752	.long	0xE38E38E4
753	.long	0xEA0EA0EB
754	.long	0xF0F0F0F1
755	.long	0xF83E0F84
756	.long	0x8000000
757	.long	0x84210843
758	.long	0x88888889
759	.long	0x8D3DCB09
760	.long	0x92492493
761	.long	0x97B425EE
762	.long	0x9D89D89E
763	.long	0xA3D70A3E
764	.long	0xAAAAAAAB
765	.long	0xB21642C9
766	.long	0xBA2E8BA3
767	.long	0xC30C30C4
768	.long	0xCCCCCCCD
769	.long	0xD79435E6
770	.long	0xE38E38E4
771	.long	0xF0F0F0F1
772	.long	0x10000000
773	.long	0x88888889
774	.long	0x92492493
775	.long	0x9D89D89E
776	.long	0xAAAAAAAB
777	.long	0xBA2E8BA3
778	.long	0xCCCCCCCD
779	.long	0xE38E38E4
780	.long	0x20000000
781	.long	0x92492493
782	.long	0xAAAAAAAB
783	.long	0xCCCCCCCD
784	.long	0x40000000
785	.long	0xAAAAAAAB
786	.long	0x80000000
787__muldiv:
788	neg	r4,r2
789	ld.as	r5,[pcl,r4]
790	abs_s	r12,r0
791	bic.f	0,r2,r4
792	mpyhu.ne r12,r12,r5
793	norm	r3,r2
794	xor.f	0,r0,r1
795	; write port allocation stall
796	rsub	r3,r3,30
797	lsr	r0,r12,r3
798	j_s.d	[blink]
799	neg.mi	r0,r0
800
801	.balign	4
802SYM(__divsi3):
803	norm	r3,r1
804	abs_s	r2,r1
805	brhs	r3,23,__muldiv
806	norm	r4,r0
807	abs_l	r12,r0
808	brhs	r4,r3,.Lonebit
809	asl_s	r2,r2,r3
810	asl	r12,r12,r4
811	sub	lp_count,r3,r4
812	sub.f	r12,r12,r2
813	brge.d	r12,r2,.Lsbit
814	sub	r4,r3,r4
815	add.lo	r12,r12,r2
816	lp	.Ldivend
817.Ldivstart:divaw r12,r12,r2
818.Ldivend:xor_s	r1,r1,r0
819	sub	r0,r4,1
820	bmsk	r0,r12,r0
821	bset.hs	r0,r0,r4
822	tst_s	r1,r1
823	j_s.d	[blink]
824	neg.mi	r0,r0
825.Lonebit:
826	xor_s	r1,r1,r0
827	asr_s	r1,r1,31
828	sub1.f	0,r12,r2	; special case:	-2**(n+1) / 2**n
829	or	r0,r1,1
830	add.eq	r0,r0,r0
831	cmp_s	r12,r2
832	j_s.d	[blink]
833	mov.lo	r0,0
834.Lsbit:
835	; Need to handle special cases involving negative powers of two:
836	; r12,r2 are normalized dividend / divisor;
837	; divide anything by 0x80000000, or divide 0x80000000 by 0x40000000
838	add_s	r12,r12,r2
839	xor_s	r1,r1,r0
840	rsub	r4,r4,-1
841	ror	r0,r12,r4
842	tst_s	r2,r2
843	bmsk	r0,r0,r3
844	add.pl	r0,r0,r0
845	tst_s	r1,r1
846	j_s.d	[blink]
847	neg.mi	r0,r0
848#else /* !MULDIV */
849/* This version requires that divaw works with a divisor of 0x80000000U  */
850	abs_s	r2,r1
851	norm	r4,r0
852	neg_s	r3,r2
853	norm	r3,r3
854	abs_s	r12,r0
855	brhs	r4,r3,.Lonebit
856	asl_s	r2,r2,r3
857	asl	r12,r12,r4
858	sub	lp_count,r3,r4
859	cmp_s	r12,r2
860	sub.hs	r12,r12,r2
861	lp	.Ldivend
862.Ldivstart:divaw r12,r12,r2
863.Ldivend:xor_s	r1,r1,r0
864	sub_s	r0,r3,1
865	bmsk	r0,r12,r0
866	bset.hs	r0,r0,r3
867	tst_s	r1,r1
868	j_s.d	[blink]
869	negmi	r0,r0
870.Lonebit:
871	xor_s	r1,r1,r0
872	asr_s	r1,r1,31
873	cmp_s	r12,r2
874	mov_s	r0,0
875	j_s.d	[blink]
876	orhs	r0,r1,1
877#endif /* MULDIV */
878
879#endif	/* ifndef __ARC700__ */
880	ENDFUNC(__divsi3)
881
882
883#endif /* L_divsi3 */
884
885#ifdef  L_umodsi3
886	.section .text
887	.align 4
888
889	.global SYM(__umodsi3)
890	FUNC(__umodsi3)
891SYM(__umodsi3):
892	mov r7,blink
893	bl.nd @SYM(__udivmodsi4)
894	j.d [r7]
895	mov r0,r1
896	ENDFUNC(__umodsi3)
897#if 0 /* interferes with linux loader */
898	.section .__arc_profile_forward, "a"
899	.long SYM(__umodsi3)
900	.long SYM(__udivmodsi4)
901	.long 65536
902#endif
903
904#endif /* L_umodsi3 */
905
906#ifdef  L_modsi3
907	.section .text
908	.align 4
909
910	.global SYM (__modsi3)
911	FUNC(__modsi3)
912SYM(__modsi3):
913#ifndef __ARC_EA__
914	/* A5 / ARC60? */
915	mov_s r12,blink
916	mov_s r11,r0
917	abs_s r0,r0
918	bl.d @SYM(__udivmodsi4)
919	abs_s r1,r1
920	tst r11,r11
921	neg_s r0,r1
922	j_s.d [r12]
923	mov.pl r0,r1
924#else /* __ARC_EA__ */
925	abs_s	r2,r1
926	norm.f	r4,r0
927	neg	r5,r2
928	norm	r3,r5
929	abs_l	r12,r0
930	brhs	r4,r3,.Lonebit
931	asl_s	r2,r2,r3
932	asl	r12,r12,r4
933	sub	lp_count,r3,r4
934	cmp_s	r12,r2
935	sub.hs	r12,r12,r2
936	tst_s	r0,r0
937	lp	.Ldivend
938.Ldivstart:divaw r12,r12,r2
939.Ldivend:
940	lsr	r0,r12,r3
941	j_s.d	[blink]
942	neg.mi	r0,r0
943	.balign	4
944.Lonebit:neg.pl	r5,r5
945	cmp_s	r12,r2
946	j_s.d	[blink]
947	sub.hs	r0,r0,r5
948#endif /* !__ARC_EA__ */
949	ENDFUNC(__modsi3)
950
951#endif /* L_modsi3 */
952
953#ifdef L_clzsi2
954       .section .text
955       .align 4
956       .global SYM (__clzsi2)
957SYM(__clzsi2):
958#ifdef __ARC_NORM__
959	HIDDEN_FUNC(__clzsi2)
960	norm.f	r0,r0
961	mov.n	r0,0
962	j_s.d	[blink]
963	add.pl	r0,r0,1
964	ENDFUNC(__clzsi2)
965#elif !defined (__ARC_BARREL_SHIFTER__)
966	FUNC(__clzsi2)
967	mov lp_count,10
968	mov_l r1,0
969	bset r2,r1,29
970	lp .Loop_end
971	brhs r0,r2,.Loop_end
972	add3 r0,r1,r0
973.Loop_end:
974	asl.f 0,r0
975	sub2 r0,lp_count,lp_count
976	sub.cs.f r0,r0,1
977	add r0,r0,31
978	j_s.d [blink]
979	add.pl r0,r0,1
980	ENDFUNC(__clzsi2)
981#else
982	FUNC(__clzsi2)
983	asl.f 0,r0,2
984	mov r1,-1
985.Lcheck:
986	bbit1.d r0,31,.Ldone
987	asl.pl r0,r0,3
988	bcs.d .Ldone_1
989	add_s r1,r1,3
990	bpnz.d .Lcheck
991	asl.f 0,r0,2
992	mov_s r0,32
993	j_s.d [blink]
994	mov.ne r0,r1
995.Ldone:
996	j_s.d [blink]
997	add_s r0,r1,1
998.Ldone_1:
999	j_s.d [blink]
1000	sub_s r0,r1,1
1001	ENDFUNC(__clzsi2)
1002#endif
1003#endif /* L_clzsi2 */
1004       .section .text
1005
1006
1007;;; MILLICODE THUNK LIB ;***************
1008
1009;;; 	.macro push_regs from, to, offset
1010;;; 		st_s "\from", [sp, \offset]
1011;;; 		.if \to-\from
1012;;; 			push_regs "(\from+1)", \to, "(\offset+4)"
1013;;; 		.endif
1014;;; 	.endm
1015;;; 	push_regs 13, 18, 0
1016;;;
1017
1018;;;;   	.macro sum from, to, three
1019;;;;   		.long \from
1020;;;;   		.long \three
1021;;;;   		.local regno
1022;;;;   		.set regno, \from+1
1023;;;;   		.set shift, 32
1024;;;;   		.set shift, shift - 1
1025;;;;   #		st_s %shift @3 lsl #shift
1026;;;;   		.if \to-\from
1027;;;;   		sum "(\from+1)", \to, "(\three)"
1028;;;;   		.endif
1029;;;;   	.endm
1030;;;;
1031;;;;   	SUM 0,5, 9
1032;;;;
1033;	.altmacro
1034;;  	.macro push_regs from=0, to=3, offset
1035;;  		st_s r\from, [sp, \offset]
1036;;  		.if \to-\from
1037;;  			push_regs "\from+1 ",\to,"(\offset+4)"
1038;;  		.endif
1039;;  	.endm
1040;;
1041;;  	.macro expand_to_push from=13, to
1042;;  ;		.section .text
1043;;  ;		.align 4
1044;;  ;		.global st_
1045;;  ;		.type foo,
1046;;  	st_13_to_25:
1047;;  ;		push_regs \from, \to, 0
1048;;  	push_regs 0,3		;
1049;;  	.endm
1050;;
1051;;  	expand_to_push 13,18
1052;;
1053;#endif
1054
1055#ifdef L_millicodethunk_st
1056	.section .text
1057	.align 4
1058	.global SYM(__st_r13_to_r15)
1059	.global SYM(__st_r13_to_r16)
1060	.global SYM(__st_r13_to_r17)
1061	.global SYM(__st_r13_to_r18)
1062	.global SYM(__st_r13_to_r19)
1063	.global SYM(__st_r13_to_r20)
1064	.global SYM(__st_r13_to_r21)
1065	.global SYM(__st_r13_to_r22)
1066	.global SYM(__st_r13_to_r23)
1067	.global SYM(__st_r13_to_r24)
1068	.global SYM(__st_r13_to_r25)
1069	HIDDEN_FUNC(__st_r13_to_r15)
1070	HIDDEN_FUNC(__st_r13_to_r16)
1071	HIDDEN_FUNC(__st_r13_to_r17)
1072	HIDDEN_FUNC(__st_r13_to_r18)
1073	HIDDEN_FUNC(__st_r13_to_r19)
1074	HIDDEN_FUNC(__st_r13_to_r20)
1075	HIDDEN_FUNC(__st_r13_to_r21)
1076	HIDDEN_FUNC(__st_r13_to_r22)
1077	HIDDEN_FUNC(__st_r13_to_r23)
1078	HIDDEN_FUNC(__st_r13_to_r24)
1079	HIDDEN_FUNC(__st_r13_to_r25)
1080	.align 4
1081SYM(__st_r13_to_r25):
1082	st r25, [sp,48]
1083SYM(__st_r13_to_r24):
1084	st r24, [sp,44]
1085SYM(__st_r13_to_r23):
1086	st r23, [sp,40]
1087SYM(__st_r13_to_r22):
1088	st r22, [sp,36]
1089SYM(__st_r13_to_r21):
1090	st r21, [sp,32]
1091SYM(__st_r13_to_r20):
1092	st r20, [sp,28]
1093SYM(__st_r13_to_r19):
1094	st r19, [sp,24]
1095SYM(__st_r13_to_r18):
1096	st r18, [sp,20]
1097SYM(__st_r13_to_r17):
1098	st r17, [sp,16]
1099SYM(__st_r13_to_r16):
1100	st r16, [sp,12]
1101SYM(__st_r13_to_r15):
1102#ifdef __ARC700__
1103	st r15, [sp,8] ; minimum function size to avoid stall: 6 bytes.
1104#else
1105	st_s r15, [sp,8]
1106#endif
1107	st_s r14, [sp,4]
1108	j_s.d [%blink]
1109	st_s r13, [sp,0]
1110	ENDFUNC(__st_r13_to_r15)
1111	ENDFUNC(__st_r13_to_r16)
1112	ENDFUNC(__st_r13_to_r17)
1113	ENDFUNC(__st_r13_to_r18)
1114	ENDFUNC(__st_r13_to_r19)
1115	ENDFUNC(__st_r13_to_r20)
1116	ENDFUNC(__st_r13_to_r21)
1117	ENDFUNC(__st_r13_to_r22)
1118	ENDFUNC(__st_r13_to_r23)
1119	ENDFUNC(__st_r13_to_r24)
1120	ENDFUNC(__st_r13_to_r25)
1121#endif  /* L_millicodethunk_st */
1122
1123
1124#ifdef L_millicodethunk_ld
1125	.section .text
1126	.align 4
1127;	==================================
1128;	the loads
1129
1130	.global SYM(__ld_r13_to_r15)
1131	.global SYM(__ld_r13_to_r16)
1132	.global SYM(__ld_r13_to_r17)
1133	.global SYM(__ld_r13_to_r18)
1134	.global SYM(__ld_r13_to_r19)
1135	.global SYM(__ld_r13_to_r20)
1136	.global SYM(__ld_r13_to_r21)
1137	.global SYM(__ld_r13_to_r22)
1138	.global SYM(__ld_r13_to_r23)
1139	.global SYM(__ld_r13_to_r24)
1140	.global SYM(__ld_r13_to_r25)
1141	HIDDEN_FUNC(__ld_r13_to_r15)
1142	HIDDEN_FUNC(__ld_r13_to_r16)
1143	HIDDEN_FUNC(__ld_r13_to_r17)
1144	HIDDEN_FUNC(__ld_r13_to_r18)
1145	HIDDEN_FUNC(__ld_r13_to_r19)
1146	HIDDEN_FUNC(__ld_r13_to_r20)
1147	HIDDEN_FUNC(__ld_r13_to_r21)
1148	HIDDEN_FUNC(__ld_r13_to_r22)
1149	HIDDEN_FUNC(__ld_r13_to_r23)
1150	HIDDEN_FUNC(__ld_r13_to_r24)
1151	HIDDEN_FUNC(__ld_r13_to_r25)
1152SYM(__ld_r13_to_r25):
1153	ld r25, [sp,48]
1154SYM(__ld_r13_to_r24):
1155	ld r24, [sp,44]
1156SYM(__ld_r13_to_r23):
1157	ld r23, [sp,40]
1158SYM(__ld_r13_to_r22):
1159	ld r22, [sp,36]
1160SYM(__ld_r13_to_r21):
1161	ld r21, [sp,32]
1162SYM(__ld_r13_to_r20):
1163	ld r20, [sp,28]
1164SYM(__ld_r13_to_r19):
1165	ld r19, [sp,24]
1166SYM(__ld_r13_to_r18):
1167	ld r18, [sp,20]
1168SYM(__ld_r13_to_r17):
1169	ld r17, [sp,16]
1170SYM(__ld_r13_to_r16):
1171	ld r16, [sp,12]
1172SYM(__ld_r13_to_r15):
1173#ifdef __ARC700__
1174	ld r15, [sp,8] ; minimum function size to avoid stall: 6 bytes.
1175#else
1176	ld_s r15, [sp,8]
1177#endif
1178	ld_s r14, [sp,4]
1179	j_s.d [%blink]
1180	ld_s r13, [sp,0]
1181	ENDFUNC(__ld_r13_to_r15)
1182	ENDFUNC(__ld_r13_to_r16)
1183	ENDFUNC(__ld_r13_to_r17)
1184	ENDFUNC(__ld_r13_to_r18)
1185	ENDFUNC(__ld_r13_to_r19)
1186	ENDFUNC(__ld_r13_to_r20)
1187	ENDFUNC(__ld_r13_to_r21)
1188	ENDFUNC(__ld_r13_to_r22)
1189	ENDFUNC(__ld_r13_to_r23)
1190	ENDFUNC(__ld_r13_to_r24)
1191	ENDFUNC(__ld_r13_to_r25)
1192
1193#endif /* L_millicodethunk_ld */
1194#ifdef L_millicodethunk_ret
1195	.global SYM(__ld_r13_to_r14_ret)
1196	.global SYM(__ld_r13_to_r15_ret)
1197	.global SYM(__ld_r13_to_r16_ret)
1198	.global SYM(__ld_r13_to_r17_ret)
1199	.global SYM(__ld_r13_to_r18_ret)
1200	.global SYM(__ld_r13_to_r19_ret)
1201	.global SYM(__ld_r13_to_r20_ret)
1202	.global SYM(__ld_r13_to_r21_ret)
1203	.global SYM(__ld_r13_to_r22_ret)
1204	.global SYM(__ld_r13_to_r23_ret)
1205	.global SYM(__ld_r13_to_r24_ret)
1206	.global SYM(__ld_r13_to_r25_ret)
1207	HIDDEN_FUNC(__ld_r13_to_r14_ret)
1208	HIDDEN_FUNC(__ld_r13_to_r15_ret)
1209	HIDDEN_FUNC(__ld_r13_to_r16_ret)
1210	HIDDEN_FUNC(__ld_r13_to_r17_ret)
1211	HIDDEN_FUNC(__ld_r13_to_r18_ret)
1212	HIDDEN_FUNC(__ld_r13_to_r19_ret)
1213	HIDDEN_FUNC(__ld_r13_to_r20_ret)
1214	HIDDEN_FUNC(__ld_r13_to_r21_ret)
1215	HIDDEN_FUNC(__ld_r13_to_r22_ret)
1216	HIDDEN_FUNC(__ld_r13_to_r23_ret)
1217	HIDDEN_FUNC(__ld_r13_to_r24_ret)
1218	HIDDEN_FUNC(__ld_r13_to_r25_ret)
1219	.section .text
1220	.align 4
1221SYM(__ld_r13_to_r25_ret):
1222	ld r25, [sp,48]
1223SYM(__ld_r13_to_r24_ret):
1224	ld r24, [sp,44]
1225SYM(__ld_r13_to_r23_ret):
1226	ld r23, [sp,40]
1227SYM(__ld_r13_to_r22_ret):
1228	ld r22, [sp,36]
1229SYM(__ld_r13_to_r21_ret):
1230	ld r21, [sp,32]
1231SYM(__ld_r13_to_r20_ret):
1232	ld r20, [sp,28]
1233SYM(__ld_r13_to_r19_ret):
1234	ld r19, [sp,24]
1235SYM(__ld_r13_to_r18_ret):
1236	ld r18, [sp,20]
1237SYM(__ld_r13_to_r17_ret):
1238	ld r17, [sp,16]
1239SYM(__ld_r13_to_r16_ret):
1240	ld r16, [sp,12]
1241SYM(__ld_r13_to_r15_ret):
1242	ld r15, [sp,8]
1243SYM(__ld_r13_to_r14_ret):
1244	ld blink,[sp,r12]
1245	ld_s r14, [sp,4]
1246	ld.ab r13, [sp,r12]
1247	j_s.d [%blink]
1248	add_s sp,sp,4
1249	ENDFUNC(__ld_r13_to_r14_ret)
1250	ENDFUNC(__ld_r13_to_r15_ret)
1251	ENDFUNC(__ld_r13_to_r16_ret)
1252	ENDFUNC(__ld_r13_to_r17_ret)
1253	ENDFUNC(__ld_r13_to_r18_ret)
1254	ENDFUNC(__ld_r13_to_r19_ret)
1255	ENDFUNC(__ld_r13_to_r20_ret)
1256	ENDFUNC(__ld_r13_to_r21_ret)
1257	ENDFUNC(__ld_r13_to_r22_ret)
1258	ENDFUNC(__ld_r13_to_r23_ret)
1259	ENDFUNC(__ld_r13_to_r24_ret)
1260	ENDFUNC(__ld_r13_to_r25_ret)
1261
1262#endif /* L_millicodethunk_ret */
1263
1264#define ARC_OPTFPE (defined (__ARC700__) || defined (__ARC_FPX_QUARK__))
1265
1266#if ARC_OPTFPE
1267#ifdef  L_adddf3
1268#ifdef __ARC_NORM__
1269#include "ieee-754/adddf3.S"
1270#endif
1271#endif
1272
1273#ifdef  L_muldf3
1274#ifdef __ARC_MPY__
1275#include "ieee-754/muldf3.S"
1276#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
1277#include "ieee-754/arc600-mul64/muldf3.S"
1278#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
1279#include "ieee-754/arc600-dsp/muldf3.S"
1280#endif
1281#endif
1282
1283#ifdef  L_addsf3
1284#ifdef __ARC_NORM__
1285#include "ieee-754/addsf3.S"
1286#endif
1287#endif
1288
1289#ifdef  L_mulsf3
1290#ifdef  __ARC_MPY__
1291#include "ieee-754/mulsf3.S"
1292#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
1293#include "ieee-754/arc600-mul64/mulsf3.S"
1294#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
1295#include "ieee-754/arc600-dsp/mulsf3.S"
1296#elif defined (__ARC_NORM__)
1297#include "ieee-754/arc600/mulsf3.S"
1298#endif
1299#endif
1300
1301#ifdef  L_divdf3
1302#ifdef  __ARC_MPY__
1303#include "ieee-754/divdf3.S"
1304#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
1305#include "ieee-754/arc600-mul64/divdf3.S"
1306#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
1307#include "ieee-754/arc600-dsp/divdf3.S"
1308#endif
1309#endif
1310
1311#ifdef  L_divsf3
1312#ifdef  __ARC_MPY__
1313#include "ieee-754/divsf3-stdmul.S"
1314#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
1315#include "ieee-754/arc600-mul64/divsf3.S"
1316#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
1317#include "ieee-754/arc600-dsp/divsf3.S"
1318#elif defined (__ARC_NORM__)
1319#include "ieee-754/arc600/divsf3.S"
1320#endif
1321#endif
1322
1323#ifdef L_extendsfdf2
1324#ifdef __ARC_NORM__
1325#include "ieee-754/extendsfdf2.S"
1326#endif
1327#endif
1328
1329#ifdef L_truncdfsf2
1330#ifdef __ARC_NORM__
1331#include "ieee-754/truncdfsf2.S"
1332#endif
1333#endif
1334
1335#ifdef L_floatsidf
1336#ifdef __ARC_NORM__
1337#include "ieee-754/floatsidf.S"
1338#endif
1339#endif
1340
1341#ifdef L_floatsisf
1342#ifdef __ARC_NORM__
1343#include "ieee-754/floatsisf.S"
1344#endif
1345#endif
1346
1347#ifdef L_floatunsidf
1348#ifdef __ARC_NORM__
1349#include "ieee-754/floatunsidf.S"
1350#endif
1351#endif
1352
1353#ifdef L_fixdfsi
1354#ifdef __ARC_NORM__
1355#include "ieee-754/fixdfsi.S"
1356#endif
1357#endif
1358
1359#ifdef L_fixsfsi
1360#ifdef __ARC_NORM__
1361#include "ieee-754/fixsfsi.S"
1362#endif
1363#endif
1364
1365#ifdef L_fixunsdfsi
1366#ifdef __ARC_NORM__
1367#include "ieee-754/fixunsdfsi.S"
1368#endif
1369#endif
1370
1371#ifdef L_eqdf2
1372#ifdef __ARC_NORM__
1373#include "ieee-754/eqdf2.S"
1374#endif
1375#endif
1376
1377#ifdef L_eqsf2
1378#ifdef __ARC_NORM__
1379#include "ieee-754/eqsf2.S"
1380#endif
1381#endif
1382
1383#ifdef L_gtdf2
1384#ifdef __ARC_NORM__
1385#include "ieee-754/gtdf2.S"
1386#endif
1387#endif
1388
1389#ifdef L_gtsf2
1390#ifdef __ARC_NORM__
1391#include "ieee-754/gtsf2.S"
1392#endif
1393#endif
1394
1395#ifdef L_gedf2
1396#ifdef __ARC_NORM__
1397#include "ieee-754/gedf2.S"
1398#endif
1399#endif
1400
1401#ifdef L_gesf2
1402#ifdef __ARC_NORM__
1403#include "ieee-754/gesf2.S"
1404#endif
1405#endif
1406
1407#ifdef L_uneqdf2
1408#ifdef __ARC_NORM__
1409#include "ieee-754/uneqdf2.S"
1410#endif
1411#endif
1412
1413#ifdef L_uneqsf2
1414#ifdef __ARC_NORM__
1415#include "ieee-754/uneqsf2.S"
1416#endif
1417#endif
1418
1419#ifdef L_orddf2
1420#ifdef __ARC_NORM__
1421#include "ieee-754/orddf2.S"
1422#endif
1423#endif
1424
1425#ifdef L_ordsf2
1426#ifdef __ARC_NORM__
1427#include "ieee-754/ordsf2.S"
1428#endif
1429#endif
1430#endif /* ARC_OPTFPE */
1431