163d1a8abSmrg/* Libgcc Target specific implementation.
2*ec02198aSmrg   Copyright (C) 2012-2020 Free Software Foundation, Inc.
363d1a8abSmrg   Contributed by KPIT Cummins Infosystems Limited.
463d1a8abSmrg
563d1a8abSmrg   This file is part of GCC.
663d1a8abSmrg
763d1a8abSmrg   GCC is free software; you can redistribute it and/or modify it under
863d1a8abSmrg   the terms of the GNU General Public License as published by the Free
963d1a8abSmrg   Software Foundation; either version 3, or (at your option) any later
1063d1a8abSmrg   version.
1163d1a8abSmrg
1263d1a8abSmrg   GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1363d1a8abSmrg   WARRANTY; without even the implied warranty of MERCHANTABILITY or
1463d1a8abSmrg   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1563d1a8abSmrg   for more details.
1663d1a8abSmrg
1763d1a8abSmrg   Under Section 7 of GPL version 3, you are granted additional
1863d1a8abSmrg   permissions described in the GCC Runtime Library Exception, version
1963d1a8abSmrg   3.1, as published by the Free Software Foundation.
2063d1a8abSmrg
2163d1a8abSmrg   You should have received a copy of the GNU General Public License and
2263d1a8abSmrg   a copy of the GCC Runtime Library Exception along with this program;
2363d1a8abSmrg   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
2463d1a8abSmrg   <http://www.gnu.org/licenses/>.  */
2563d1a8abSmrg
2663d1a8abSmrg#ifdef  L_mulsi3
2763d1a8abSmrg	.text
2863d1a8abSmrg	.align  4
2963d1a8abSmrg	.globl  ___mulsi3
3063d1a8abSmrg___mulsi3:
3163d1a8abSmrg	movw    r4,r0
3263d1a8abSmrg	movw    r2,r1
3363d1a8abSmrg	/* Extended multiplication between the 2 lower words */
3463d1a8abSmrg	muluw   r1,(r1,r0)
3563d1a8abSmrg
3663d1a8abSmrg	/* Multiply the lower word of each parameter  */
3763d1a8abSmrg	mulw    r2,r5
3863d1a8abSmrg
3963d1a8abSmrg	/* With the higher word of the other  */
4063d1a8abSmrg	mulw    r3,r4
4163d1a8abSmrg
4263d1a8abSmrg	/* Add products to the higher part of the final result  */
4363d1a8abSmrg	addw    r4,r1
4463d1a8abSmrg	addw    r5,r1
4563d1a8abSmrg	jump    (ra)
4663d1a8abSmrg#endif
4763d1a8abSmrg
4863d1a8abSmrg#ifdef L_divdi3
4963d1a8abSmrg	.text
5063d1a8abSmrg	.align 4
5163d1a8abSmrg	.globl ___divdi3
5263d1a8abSmrg
5363d1a8abSmrg___divdi3:
5463d1a8abSmrg	push	$4, r7, ra
5563d1a8abSmrg
5663d1a8abSmrg	/* Param #1 Long Long low bit first */
5763d1a8abSmrg	loadd   12(sp), (r1, r0)
5863d1a8abSmrg	loadd   16(sp), (r3, r2)
5963d1a8abSmrg
6063d1a8abSmrg	/* Param #2 Long Long low bit first */
6163d1a8abSmrg	loadd   20(sp), (r5, r4)
6263d1a8abSmrg	loadd   24(sp), (r7, r6)
6363d1a8abSmrg
6463d1a8abSmrg	/* Set neg to 0 */
6563d1a8abSmrg	movw $0, r10
6663d1a8abSmrg
6763d1a8abSmrg	subd $16, (sp)
6863d1a8abSmrg
6963d1a8abSmrg	/* Compare if param1 is greater than 0 */
7063d1a8abSmrg	cmpw $0, r3
7163d1a8abSmrg	ble L4
7263d1a8abSmrg
7363d1a8abSmrg	/* Invert param1 and neg */
7463d1a8abSmrg	movd $-1, (r9, r8) 	/* Temp set to FFFFFFFF */
7563d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of param 1 with temp */
7663d1a8abSmrg	xord (r9, r8), (r3, r2)	/* Xor high bits of param 1 with temp */
7763d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of param 1 */
7863d1a8abSmrg	xorw $1, r10		/* Invert neg */
7963d1a8abSmrg	bcc L4			/* If no carry occurred go to L4 */
8063d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bits of param 1 */
8163d1a8abSmrg
8263d1a8abSmrgL4:	stord (r1, r0), 0(sp)
8363d1a8abSmrg	stord (r3, r2), 4(sp)
8463d1a8abSmrg
8563d1a8abSmrg	/* Compare if param2 is greater than 0 */
8663d1a8abSmrg	cmpw $0, r7
8763d1a8abSmrg	ble L5
8863d1a8abSmrg
8963d1a8abSmrg	/* Invert param2 and neg */
9063d1a8abSmrg	movd $-1, (r9, r8)	/* Temp set to FFFFFFFF */
9163d1a8abSmrg	xord (r9, r8), (r5, r4)	/* Xor low bits of param 2 with temp */
9263d1a8abSmrg	xord (r9, r8), (r7, r6)	/* Xor high bits of param 2 with temp */
9363d1a8abSmrg	addd $1, (r5, r4)	/* Add 1 to low bits of param 2 */
9463d1a8abSmrg	xorw $1, r10		/* Invert neg */
9563d1a8abSmrg	bcc L5			/* If no carry occurred go to L5 */
9663d1a8abSmrg	addd $1, (r7, r6)	/* Add 1 to high bits of param 2 */
9763d1a8abSmrg
9863d1a8abSmrgL5:	stord (r5, r4), 8(sp)
9963d1a8abSmrg	stord (r7, r6), 12(sp)
10063d1a8abSmrg	movw $0, r2
10163d1a8abSmrg
10263d1a8abSmrg	/* Call udivmoddi3 */
10363d1a8abSmrg#ifdef __PIC__
10463d1a8abSmrg	loadd	___udivmoddi3@cGOT(r12), (r1,r0)
10563d1a8abSmrg	jal	(r1,r0)
10663d1a8abSmrg#else
10763d1a8abSmrg	bal (ra), ___udivmoddi3
10863d1a8abSmrg#endif
10963d1a8abSmrg
11063d1a8abSmrg	/* If (neg) */
11163d1a8abSmrg	addd	$16, (sp)
11263d1a8abSmrg	cmpw $0, r10		/* Compare 0 with neg */
11363d1a8abSmrg	beq Lexit__
11463d1a8abSmrg
11563d1a8abSmrg	/* Neg = -Neg */
11663d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of ures with temp */
11763d1a8abSmrg	xord (r9, r8), (r3, r2)	/* Xor high bits of ures with temp */
11863d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of ures */
11963d1a8abSmrg	bcc Lexit__
12063d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bit of ures */
12163d1a8abSmrg
12263d1a8abSmrgLexit__:
12363d1a8abSmrg#  ifdef __ID_SHARED_LIB__
12463d1a8abSmrg	pop	$2, r12
12563d1a8abSmrg#  endif
12663d1a8abSmrg	popret $4, r7, ra
12763d1a8abSmrg#endif
12863d1a8abSmrg
12963d1a8abSmrg#ifdef L_lshrdi3
13063d1a8abSmrg	.text
13163d1a8abSmrg	.align 4
13263d1a8abSmrg	.globl ___lshrdi3
13363d1a8abSmrg
13463d1a8abSmrg___lshrdi3:
13563d1a8abSmrg	push	$3, r7
13663d1a8abSmrg
13763d1a8abSmrg	/* Load parameters from stack in this order */
13863d1a8abSmrg	movw r2, r6		/* Number of shifts */
13963d1a8abSmrg	loadd	6(sp), (r1, r0)	/* Low bits */
14063d1a8abSmrg	loadd	10(sp), (r3, r2)/* High bits */
14163d1a8abSmrg
14263d1a8abSmrg	xorw $-1, r6		/* Invert number of shifts */
14363d1a8abSmrg	addw $1, r6		/* Add 1 by number of shifts */
14463d1a8abSmrg
14563d1a8abSmrg	movw r6, r7		/* Copy number of shifts */
14663d1a8abSmrg
14763d1a8abSmrg	tbit $15, r6		/* Test if number is negative */
14863d1a8abSmrg	bfs L2			/* If negative jump to L2 */
14963d1a8abSmrg
15063d1a8abSmrg	movd (r1, r0), (r9, r8)	/* Copy low bits */
15163d1a8abSmrg
15263d1a8abSmrg	subw $32, r7		/* Calc how many bits will overflow */
15363d1a8abSmrg	/* Shift the temp low bit to the right to see the overflowing bits  */
15463d1a8abSmrg	lshd r7, (r9, r8)
15563d1a8abSmrg
15663d1a8abSmrg	cmpw $32, r6		/* If number of shifts is higher than 31 */
15763d1a8abSmrg	blt L1			/* Shift by moving */
15863d1a8abSmrg
15963d1a8abSmrg	lshd r6, (r3, r2)	/* Shift high bits */
16063d1a8abSmrg	lshd r6, (r1, r0)	/* Shift low bits */
16163d1a8abSmrg	addd (r9, r8), (r3, r2)	/* Add overflow to the high bits */
16263d1a8abSmrg	popret	$3, r7		/* Return */
16363d1a8abSmrg
16463d1a8abSmrgL1:	movd $0, (r1, r0)	/* Reset low bit */
16563d1a8abSmrg	movd (r9, r8), (r3, r2)	/* Add the overflow from the low bit */
16663d1a8abSmrg	popret	$3, r7		/* Return */
16763d1a8abSmrg
16863d1a8abSmrgL2:	movd (r3, r2), (r9, r8)	/* Copy high bits */
16963d1a8abSmrg
17063d1a8abSmrg	addw $32, r7		/* Calc how many bits will overflow */
17163d1a8abSmrg	/* Shift the temp low bit to the left to see the overflowing bits  */
17263d1a8abSmrg	lshd r7, (r9, r8)
17363d1a8abSmrg
17463d1a8abSmrg	cmpw $-32, r6		/* If number of shifts is lower than -31 */
17563d1a8abSmrg	bgt L3			/* Shift by moving */
17663d1a8abSmrg
17763d1a8abSmrg	lshd r6, (r1, r0)	/* Shift low bits */
17863d1a8abSmrg	lshd r6, (r3, r2)	/* Shift high bits */
17963d1a8abSmrg	addd (r9, r8), (r1, r0)	/* Add overflow to the low bits */
18063d1a8abSmrg	popret	$3, r7		/* Return */
18163d1a8abSmrg
18263d1a8abSmrgL3:	movd $0, (r3, r2)	/* Reset the high bit */
18363d1a8abSmrg	movd (r9, r8), (r1, r0)	/* Add the overflow from the high bit */
18463d1a8abSmrg	popret	$3, r7		/* Return */
18563d1a8abSmrg#endif
18663d1a8abSmrg
18763d1a8abSmrg#ifdef L_moddi3
18863d1a8abSmrg	.text
18963d1a8abSmrg	.align 4
19063d1a8abSmrg	.globl ___moddi3
19163d1a8abSmrg
19263d1a8abSmrg___moddi3:
19363d1a8abSmrg	push	$4, r7, ra
19463d1a8abSmrg
19563d1a8abSmrg	/* Param #1 Long Long low bit first */
19663d1a8abSmrg	loadd	12(sp), (r1, r0)
19763d1a8abSmrg	loadd	16(sp), (r3, r2)
19863d1a8abSmrg
19963d1a8abSmrg	/* Param #2 Long Long low bit first */
20063d1a8abSmrg	loadd	20(sp), (r5, r4)
20163d1a8abSmrg	loadd	24(sp), (r7, r6)
20263d1a8abSmrg
20363d1a8abSmrg	subd	$18, (sp)
20463d1a8abSmrg
20563d1a8abSmrg	/* Set neg to 0 */
20663d1a8abSmrg	storw $0, 16(sp)
20763d1a8abSmrg
20863d1a8abSmrg	movd	$-1, (r9, r8) 	/* Temp set to FFFFFFFF */
20963d1a8abSmrg
21063d1a8abSmrg	/* Compare if param1 is greater than 0 */
21163d1a8abSmrg	cmpw $0, r3
21263d1a8abSmrg	ble L4
21363d1a8abSmrg
21463d1a8abSmrg	/* Invert param1 and neg */
21563d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of param 1 with temp */
21663d1a8abSmrg	xord (r9, r8), (r3, r2)	/* Xor high bits of param 1 with temp */
21763d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of param 1 */
21863d1a8abSmrg	storw $1, 16(sp)
21963d1a8abSmrg	bcc L4			/* If no carry occurred go to L4 */
22063d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bits of param 1 */
22163d1a8abSmrg
22263d1a8abSmrgL4:	stord (r1, r0), 0(sp)
22363d1a8abSmrg	stord (r3, r2), 4(sp)
22463d1a8abSmrg
22563d1a8abSmrg	/* Compare if param2 is greater than 0 */
22663d1a8abSmrg	cmpw $0, r7
22763d1a8abSmrg	ble L5
22863d1a8abSmrg
22963d1a8abSmrg	/* Invert param2 and neg */
23063d1a8abSmrg	xord (r9, r8), (r5, r4)	/* Xor low bits of param 2 with temp */
23163d1a8abSmrg	xord (r9, r8), (r7, r6)	/* Xor high bits of param 2 with temp */
23263d1a8abSmrg	addd $1, (r5, r4)	/* Add 1 to low bits of param 2 */
23363d1a8abSmrg	bcc L5			/* If no carry occurred go to L5 */
23463d1a8abSmrg	addd $1, (r7, r6)	/* Add 1 to high bits of param 2 */
23563d1a8abSmrg
23663d1a8abSmrgL5:	stord (r5, r4), 8(sp)
23763d1a8abSmrg	stord (r7, r6), 12(sp)
23863d1a8abSmrg	movw $1, r2
23963d1a8abSmrg
24063d1a8abSmrg	/* Call udivmoddi3 */
24163d1a8abSmrg#ifdef __PIC__
24263d1a8abSmrg	loadd	___udivmoddi3@cGOT(r12), (r1,r0)
24363d1a8abSmrg	jal	(r1,r0)
24463d1a8abSmrg#else
24563d1a8abSmrg	bal (ra), ___udivmoddi3
24663d1a8abSmrg#endif
24763d1a8abSmrg
24863d1a8abSmrg	/* If (neg) */
24963d1a8abSmrg	loadw 16(sp), r10	/* Load neg from stack */
25063d1a8abSmrg	addd	$18, (sp)
25163d1a8abSmrg	cmpw $0, r10		/* Compare 0 with neg */
25263d1a8abSmrg	beq	Lexit__
25363d1a8abSmrg
25463d1a8abSmrg	/* Neg = -Neg */
25563d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of ures with temp */
25663d1a8abSmrg	xord (r9, r8), (r3, r2)	/* Xor high bits of ures with temp */
25763d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of ures */
25863d1a8abSmrg	bcc	Lexit__
25963d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bit of ures */
26063d1a8abSmrgLexit__:
26163d1a8abSmrg#  ifdef __ID_SHARED_LIB__
26263d1a8abSmrg	pop	$2, r12
26363d1a8abSmrg#  endif
26463d1a8abSmrg	popret	$4, r7, ra
26563d1a8abSmrg#endif
26663d1a8abSmrg
26763d1a8abSmrg#ifdef L_muldi3
26863d1a8abSmrg	.text
26963d1a8abSmrg	.align 4
27063d1a8abSmrg	.globl ___muldi3
27163d1a8abSmrg
27263d1a8abSmrg___muldi3:
27363d1a8abSmrg	push	$2, r13
27463d1a8abSmrg	push	$7, r7
27563d1a8abSmrg
27663d1a8abSmrg	/* Param #1 Long Long low bit first */
27763d1a8abSmrg	loadd   18(sp), (r1, r0)
27863d1a8abSmrg	loadd   22(sp), (r3, r2)
27963d1a8abSmrg
28063d1a8abSmrg	/* Param #2 Long Long low bit first */
28163d1a8abSmrg	loadd   26(sp), (r5, r4)
28263d1a8abSmrg	loadd   30(sp), (r7, r6)
28363d1a8abSmrg
28463d1a8abSmrg	/* Clear r13, r12 */
28563d1a8abSmrg	movd $0, (r12)
28663d1a8abSmrg	movd $0, (r13)
28763d1a8abSmrg
28863d1a8abSmrg	/* Set neg */
28963d1a8abSmrg	movw $0, r10
29063d1a8abSmrg
29163d1a8abSmrg	/* Compare if param1 is greater than 0 */
29263d1a8abSmrg	cmpw $0, r3
29363d1a8abSmrg	ble L1
29463d1a8abSmrg
29563d1a8abSmrg	/* Invert param1 and neg */
29663d1a8abSmrg	movd $-1, (r9, r8) 	/* Temp set to FFFFFFFF */
29763d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of param 1 with temp */
29863d1a8abSmrg	xord (r9, r8), (r3, r2)	/* Xor high bits of param 1 with temp */
29963d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of param 1 */
30063d1a8abSmrg	xorw $1, r10		/* Invert neg */
30163d1a8abSmrg	bcc L1			/* If no carry occurred go to L1 */
30263d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bits of param 1 */
30363d1a8abSmrg
30463d1a8abSmrgL1:	/* Compare if param2 is greater than 0 */
30563d1a8abSmrg	cmpw $0, r7
30663d1a8abSmrg	ble L2
30763d1a8abSmrg
30863d1a8abSmrg	/* Invert param2 and neg */
30963d1a8abSmrg	movd $-1, (r9, r8)	/* Temp set to FFFFFFFF */
31063d1a8abSmrg	xord (r9, r8), (r5, r4)	/* Xor low bits of param 2 with temp */
31163d1a8abSmrg	xord (r9, r8), (r7, r6)	/* Xor high bits of param 2 with temp */
31263d1a8abSmrg	addd $1, (r5, r4)	/* Add 1 to low bits of param 2 */
31363d1a8abSmrg	xorw $1, r10		/* Invert neg */
31463d1a8abSmrg	bcc L2			/* If no carry occurred go to L2 */
31563d1a8abSmrg	addd $1, (r7, r6)	/* Add 1 to high bits of param 2 */
31663d1a8abSmrg
31763d1a8abSmrgL2:	storw r10, 18(sp)	/* Store neg to stack so we can use r10 */
31863d1a8abSmrg
31963d1a8abSmrg	/* B*D */
32063d1a8abSmrg	/* Bl*Dl */
32163d1a8abSmrg	macuw r0, r4, (r12)	/* Multiply r0 and r4 and add to r12 */
32263d1a8abSmrg
32363d1a8abSmrg	/* Bh*Dl */
32463d1a8abSmrg	movd $0, (r9, r8)	/* Clear r9, r8 */
32563d1a8abSmrg	macuw r1, r4, (r9, r8)	/* Multiply Bh*Dl and add result to (r9, r8) */
32663d1a8abSmrg	movw r9,  r10		/* Shift left: r9 to r10 */
32763d1a8abSmrg	lshd $16, (r9, r8)	/* Shift left: r8 to r9 */
32863d1a8abSmrg	movw $0, r11		/* Clear r11 */
32963d1a8abSmrg	addd (r9, r8), (r12)	/* Add (r9, r8) to r12 */
33063d1a8abSmrg	bcc L3			/* If no carry occurred go to L3 */
33163d1a8abSmrg	addd $1, (r13)		/* If carry occurred add 1 to r13 */
33263d1a8abSmrg
33363d1a8abSmrgL3:	addd (r11, r10), (r13)	/* Add (r11, r10) to r13 */
33463d1a8abSmrg
33563d1a8abSmrg	/* Bl*Dh */
33663d1a8abSmrg	movd $0, (r9, r8)	/* Clear (r9, r8) */
33763d1a8abSmrg	macuw r0, r5, (r9, r8)	/* Multiply r0 and r5 and stor in (r9, r8) */
33863d1a8abSmrg	movw r9, r10		/* Shift left: r9 to r10 */
33963d1a8abSmrg	lshd $16, (r9, r8)	/* Shift left: r8 to r9 */
34063d1a8abSmrg	addd (r9, r8), (r12)	/* Add (r9, r8) to r12 */
34163d1a8abSmrg	bcc L4			/* If no carry occurred go to L4 */
34263d1a8abSmrg	addd $1, (r13)		/* If carry occurred add 1 to r13 */
34363d1a8abSmrg
34463d1a8abSmrgL4:	addd (r11, r10), (r13)	/* Add (r11, r10) to r13 */
34563d1a8abSmrg
34663d1a8abSmrg	/* Bh*Dh */
34763d1a8abSmrg	movd $0, (r9, r8)	/* Clear (r9, r8) */
34863d1a8abSmrg	macuw r1, r5, (r9, r8)	/* Multiply r1 and r5 and add to r13 */
34963d1a8abSmrg	addd (r9, r8), (r13)	/* Add (r9, r8) to result */
35063d1a8abSmrg
35163d1a8abSmrg	/* A*D */
35263d1a8abSmrg	/* Al*Dl */
35363d1a8abSmrg	movd $0, (r11, r10)	/* Clear (r11, r10) */
35463d1a8abSmrg	macuw r2, r4, (r11, r10)/* Multiply r2 and r4 and add to (r11, r10) */
35563d1a8abSmrg
35663d1a8abSmrg	addd (r13), (r11, r10)	/* Copy r13 to (r11, r10) */
35763d1a8abSmrg
35863d1a8abSmrg	/* Al*Dh */
35963d1a8abSmrg	movd $0, (r9, r8)	/* Clear (r9, r8) */
36063d1a8abSmrg	macuw r2, r5, (r9, r8)	/* Multiply r2 and r5 and add to (r9, r8) */
36163d1a8abSmrg	addw r8, r11		/* Add r8 to r11 */
36263d1a8abSmrg
36363d1a8abSmrg	/* Ah*Dl */
36463d1a8abSmrg	muluw r3, (r5, r4)	/* Multiply r3 and r4 and stor in (r5, r4) */
36563d1a8abSmrg	addw r4, r11		/* Add r4 to r11 */
36663d1a8abSmrg
36763d1a8abSmrg	/* B*C */
36863d1a8abSmrg	/* Bl*Cl */
36963d1a8abSmrg	movd $0, (r9, r8)	/* Clear (r9, r8) */
37063d1a8abSmrg	macuw r0, r6, (r9, r8)	/* Multiply r0 and r6 and add to (r9, r8) */
37163d1a8abSmrg	addd (r9, r8), (r11, r10)/* Add (r9, r8) to result */
37263d1a8abSmrg
37363d1a8abSmrg	/* Bl*Ch */
37463d1a8abSmrg	movd $0, (r9, r8)	/* Clear (r9, r8) */
37563d1a8abSmrg	macuw r0, r7, (r9, r8)	/* Multiply r0 and r7 and add to (r9, r8) */
37663d1a8abSmrg	addw r8, r11		/* Add r8 to r11 */
37763d1a8abSmrg
37863d1a8abSmrg	loadw 18(sp), r8	/* Load neg from stack */
37963d1a8abSmrg
38063d1a8abSmrg	/* Bh*Cl */
38163d1a8abSmrg	muluw r1, (r7, r6)	/* Multiply r1 and r6 and stor in (r7, r6) */
38263d1a8abSmrg	addw r6, r11		/* Add r6 to r11 */
38363d1a8abSmrg
38463d1a8abSmrgE1:	movd (r11, r10), (r3, r2)
38563d1a8abSmrg	movd (r12), (r1, r0)
38663d1a8abSmrg
38763d1a8abSmrg	/* If (neg) */
38863d1a8abSmrg	cmpw $0, r8		/* Compare 0 with neg */
38963d1a8abSmrg	beq	Lexit__
39063d1a8abSmrg
39163d1a8abSmrg	/* Neg = -Neg */
39263d1a8abSmrg	movd $-1, (r9, r8) 	/* Temp set to FFFFFFFF */
39363d1a8abSmrg	xord (r9, r8), (r1, r0)	/* Xor low bits of result with temp */
39463d1a8abSmrg	xord (r9, r8), (r3, r2) /* Xor high bits of result with temp */
39563d1a8abSmrg	addd $1, (r1, r0)	/* Add 1 to low bits of result */
39663d1a8abSmrg	bcc	Lexit__
39763d1a8abSmrg	addd $1, (r3, r2)	/* Add 1 to high bit of result */
39863d1a8abSmrgLexit__:
39963d1a8abSmrg	pop	$7, r7
40063d1a8abSmrg	popret	$2, r13
40163d1a8abSmrg#endif
40263d1a8abSmrg
40363d1a8abSmrg#ifdef L_negdi2
40463d1a8abSmrg	.text
40563d1a8abSmrg	.align 4
40663d1a8abSmrg	.globl ___negdi2
40763d1a8abSmrg
40863d1a8abSmrg___negdi2:
40963d1a8abSmrg	/* Load parameter from the registers in this order */
41063d1a8abSmrg	loadd 0(sp), (r1, r0)
41163d1a8abSmrg	loadd 4(sp), (r3, r2)
41263d1a8abSmrg
41363d1a8abSmrg	movd $-1, (r6, r5)	/* Set temp to FFFFFFFF */
41463d1a8abSmrg	xord (r6, r5), (r1, r0)	/* Xor low bits with temp */
41563d1a8abSmrg	xord (r6, r5), (r3, r2)	/* Xor high bits with temp */
41663d1a8abSmrg	addd $1, (r1, r0)	/* Add one */
41763d1a8abSmrg	jcc (ra)
41863d1a8abSmrg	addd $1, (r3, r2)	/* Add the carry to the high bits */
41963d1a8abSmrg	jump (ra)
42063d1a8abSmrg#endif
42163d1a8abSmrg
42263d1a8abSmrg#ifdef L_udivdi3
42363d1a8abSmrg	.text
42463d1a8abSmrg	.align 4
42563d1a8abSmrg	.globl ___udivdi3
42663d1a8abSmrg
42763d1a8abSmrg___udivdi3:
42863d1a8abSmrg	movw $0, r2
42963d1a8abSmrg	br ___udivmoddi3
43063d1a8abSmrg#endif
43163d1a8abSmrg
43263d1a8abSmrg#ifdef L_udivmoddi3
43363d1a8abSmrg	.text
43463d1a8abSmrg	.align 4
43563d1a8abSmrg	.globl ___udivmoddi3
43663d1a8abSmrg
43763d1a8abSmrg___udivmoddi3:
43863d1a8abSmrg	push	$2, r13
43963d1a8abSmrg	push	$7, r7
44063d1a8abSmrg
44163d1a8abSmrg	/* Param #1 Long Long low bit first */
44263d1a8abSmrg	loadd	18(sp), (r1, r0)
44363d1a8abSmrg	storw	r2, 18(sp)	/* Store modulo on stack */
44463d1a8abSmrg	loadd	22(sp), (r3, r2)
44563d1a8abSmrg
44663d1a8abSmrg	/* Param #2 Long Long low bit first */
44763d1a8abSmrg	loadd	26(sp), (r5, r4)
44863d1a8abSmrg	loadd	30(sp), (r7, r6)
44963d1a8abSmrg
45063d1a8abSmrg	/* Set ures to 0 */
45163d1a8abSmrg	movd $0, (r13)
45263d1a8abSmrg	movd $0, (r12)
45363d1a8abSmrg
45463d1a8abSmrg	cmpd (r12), (r5, r4)
45563d1a8abSmrg	beq LE
45663d1a8abSmrg
45763d1a8abSmrgL5:	movd $1, (r9, r8)	/* Store 1 in low bits from bit */
45863d1a8abSmrg	movd $0, (r11, r10)	/* Store 0 in high bits from bit */
45963d1a8abSmrg
46063d1a8abSmrgL6:	/* While (den < num && (!den & (1LL<<63))) */
46163d1a8abSmrg	/* Compare high bits from param 1 and param 2 */
46263d1a8abSmrg	cmpd (r7, r6), (r3, r2)
46363d1a8abSmrg	bhi L10			/* If param 2 is greater go to L10 */
46463d1a8abSmrg	bne L8			/* If param 1 is greater go to L8 */
46563d1a8abSmrg	cmpd (r5, r4), (r1, r0)	/* Compare low bits from param 1 and param 2 */
46663d1a8abSmrg	/*  If param 2 is greater or the same go to L1 */
46763d1a8abSmrg	bhs L10
46863d1a8abSmrg
46963d1a8abSmrgL8:	/* Check if most significant bit of param 2 is set */
47063d1a8abSmrg	tbit $15, r7
47163d1a8abSmrg	bfs L10			/* If PSR is set go to L10 */
47263d1a8abSmrg
47363d1a8abSmrg	/* Shift bit */
47463d1a8abSmrg	lshd $1, (r11, r10)	/* Shift left: high bits of bit */
47563d1a8abSmrg	/* Check if most significant bit of bit is set */
47663d1a8abSmrg	tbit $15, r9
47763d1a8abSmrg	lshd $1, (r9, r8)	/* Shift left: low bits of bit */
47863d1a8abSmrg	bfs L28			/* If PSR is set go to L28 */
47963d1a8abSmrg
48063d1a8abSmrgL9:	/* Shift b */
48163d1a8abSmrg	lshd $1, (r7, r6)	/* Shift left: high bits of param 2 */
48263d1a8abSmrg	/* Check if most significant bit of param 2 is set */
48363d1a8abSmrg	tbit $15, r5
48463d1a8abSmrg	lshd $1, (r5, r4)	/* Shift left: low bits of param 2 */
48563d1a8abSmrg	bfc L6			/* If PSR is set go to L6 */
48663d1a8abSmrg	addw $1, r6		/* Add 1 to the highest bits of b */
48763d1a8abSmrg	br L6			/* Go to L6 */
48863d1a8abSmrg
48963d1a8abSmrgL10:	/* While (bit) */
49063d1a8abSmrg	cmpd $0, (r11, r10)
49163d1a8abSmrg	bne L11
49263d1a8abSmrg	cmpd $0, (r9, r8)
49363d1a8abSmrg	beq E1
49463d1a8abSmrg
49563d1a8abSmrgL11:	/* If (num >= den) */
49663d1a8abSmrg	cmpd (r3, r2), (r7, r6)	/* Compare high bits of param 1 and param 2 */
49763d1a8abSmrg	blo L15			/* If param 1 lower than param 2 go to L15 */
49863d1a8abSmrg	bne L12			/* If not equal go to L12 */
49963d1a8abSmrg	cmpd (r1, r0), (r5, r4)	/* Compare low bits of param 1 and param 2 */
50063d1a8abSmrg	blo L15			/* If param 1 lower than param 2 go to L15 */
50163d1a8abSmrg
50263d1a8abSmrgL12:	/* Ures |= bit */
50363d1a8abSmrg	ord (r11, r10), (r13)
50463d1a8abSmrg	ord (r9, r8), (r12)
50563d1a8abSmrg
50663d1a8abSmrg	/* Num -= den */
50763d1a8abSmrg	subd (r7, r6), (r3, r2) /* Subtract highest 32 bits from each other */
50863d1a8abSmrg	subd (r5, r4), (r1, r0)	/* Subtract lowest 32 bits from each other */
50963d1a8abSmrg	bcc L15			/* If no carry occurred go to L15 */
51063d1a8abSmrg	subd $1, (r3, r2)	/* Subtract the carry */
51163d1a8abSmrg
51263d1a8abSmrgL15:	/* Shift bit to the right */
51363d1a8abSmrg	lshd $-1, (r9, r8)	/* Shift right: low bits of bit */
51463d1a8abSmrg	/* Check if least significant bit of high bits is set */
51563d1a8abSmrg	tbit $0, r10
51663d1a8abSmrg	lshd $-1, (r11, r10)	/* Shift right: high bits of bit */
51763d1a8abSmrg	bfs L18			/* If PSR is set go to L18 */
51863d1a8abSmrg
51963d1a8abSmrgL17:	/* Shift param#2 to the right */
52063d1a8abSmrg	lshd $-1, (r5, r4)	/* Shift right: low bits of param 2 */
52163d1a8abSmrg	/* Check if least significant bit of high bits is set */
52263d1a8abSmrg	tbit $0, r6
52363d1a8abSmrg	lshd $-1, (r7, r6)	/* Shift right: high bits of param 2 */
52463d1a8abSmrg	bfc L10			/* If PSR is not set go to L10 */
52563d1a8abSmrg	/* Or with 0x8000 to set most significant bit */
52663d1a8abSmrg	orw $32768, r5
52763d1a8abSmrg	br L10			/* Go to L10 */
52863d1a8abSmrg
52963d1a8abSmrgL18:	/* Or with 0x8000 to set most significant bit */
53063d1a8abSmrg	orw $32768, r9
53163d1a8abSmrg	br L17
53263d1a8abSmrg
53363d1a8abSmrgL28: 	/* Left shift bit */
53463d1a8abSmrg	addw $1, r10		/* Add 1 to highest bits of bit */
53563d1a8abSmrg	br L9			/* Go to L9 */
53663d1a8abSmrg
53763d1a8abSmrgLE:	cmpd (r12), (r7, r6)
53863d1a8abSmrg	bne L5
53963d1a8abSmrg	excp dvz
54063d1a8abSmrg	br	Lexit__
54163d1a8abSmrg
54263d1a8abSmrgE1:	loadw	18(sp), r4
54363d1a8abSmrg	cmpw $0, r4
54463d1a8abSmrg	bne	Lexit__
54563d1a8abSmrg
54663d1a8abSmrg	/* Return result */
54763d1a8abSmrg	movd (r12), (r1, r0)
54863d1a8abSmrg	movd (r13), (r3, r2)
54963d1a8abSmrgLexit__:
55063d1a8abSmrg	pop	$7, r7
55163d1a8abSmrg	popret	$2, r13
55263d1a8abSmrg#endif
55363d1a8abSmrg
55463d1a8abSmrg#ifdef L_umoddi3
55563d1a8abSmrg	.text
55663d1a8abSmrg	.align 4
55763d1a8abSmrg	.globl ___umoddi3
55863d1a8abSmrg
55963d1a8abSmrg___umoddi3:
56063d1a8abSmrg	movw $1, r2
56163d1a8abSmrg	br ___udivmoddi3
56263d1a8abSmrg#endif
56363d1a8abSmrg
564