xref: /openbsd/sys/lib/libkern/arch/hppa/milli.S (revision d771b0cb)
1;	$OpenBSD: milli.S,v 1.6 2022/01/11 09:21:35 jsg Exp $
2;
3;  (c) Copyright 1986 HEWLETT-PACKARD COMPANY
4;
5;  To anyone who acknowledges that this file is provided "AS IS"
6;  without any express or implied warranty:
7;      permission to use, copy, modify, and distribute this file
8;  for any purpose is hereby granted without fee, provided that
9;  the above copyright notice and this notice appears in all
10;  copies, and that the name of Hewlett-Packard Company not be
11;  used in advertising or publicity pertaining to distribution
12;  of the software without specific, written prior permission.
13;  Hewlett-Packard Company makes no representations about the
14;  suitability of this software for any purpose.
15;
16
17; Standard Hardware Register Definitions for Use with Assembler
18; version A.08.06
19;	- fr16-31 added at Utah
20;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21; Hardware General Registers
22r0: .equ	0
23
24r1: .equ	1
25
26r2: .equ	2
27
28r3: .equ	3
29
30r4: .equ	4
31
32r5: .equ	5
33
34r6: .equ	6
35
36r7: .equ	7
37
38r8: .equ	8
39
40r9: .equ	9
41
42r10: .equ	10
43
44r11: .equ	11
45
46r12: .equ	12
47
48r13: .equ	13
49
50r14: .equ	14
51
52r15: .equ	15
53
54r16: .equ	16
55
56r17: .equ	17
57
58r18: .equ	18
59
60r19: .equ	19
61
62r20: .equ	20
63
64r21: .equ	21
65
66r22: .equ	22
67
68r23: .equ	23
69
70r24: .equ	24
71
72r25: .equ	25
73
74r26: .equ	26
75
76r27: .equ	27
77
78r28: .equ	28
79
80r29: .equ	29
81
82r30: .equ	30
83
84r31: .equ	31
85
86; Hardware Space Registers
87sr0: .equ	0
88
89sr1: .equ	1
90
91sr2: .equ	2
92
93sr3: .equ	3
94
95sr4: .equ	4
96
97sr5: .equ	5
98
99sr6: .equ	6
100
101sr7: .equ	7
102
103; Hardware Floating Point Registers
104fr0: .equ	0
105
106fr1: .equ	1
107
108fr2: .equ	2
109
110fr3: .equ	3
111
112fr4: .equ	4
113
114fr5: .equ	5
115
116fr6: .equ	6
117
118fr7: .equ	7
119
120fr8: .equ	8
121
122fr9: .equ	9
123
124fr10: .equ	10
125
126fr11: .equ	11
127
128fr12: .equ	12
129
130fr13: .equ	13
131
132fr14: .equ	14
133
134fr15: .equ	15
135
136fr16: .equ	16
137
138fr17: .equ	17
139
140fr18: .equ	18
141
142fr19: .equ	19
143
144fr20: .equ	20
145
146fr21: .equ	21
147
148fr22: .equ	22
149
150fr23: .equ	23
151
152fr24: .equ	24
153
154fr25: .equ	25
155
156fr26: .equ	26
157
158fr27: .equ	27
159
160fr28: .equ	28
161
162fr29: .equ	29
163
164fr30: .equ	30
165
166fr31: .equ	31
167
168; Hardware Control Registers
169cr0: .equ	0
170
171rctr: .equ	0			; Recovery Counter Register
172
173cr8: .equ	8			; Protection ID 1
174
175pidr1: .equ	8
176
177cr9: .equ	9			; Protection ID 2
178
179pidr2: .equ	9
180
181cr10: .equ	10
182
183ccr: .equ	10			; Coprocessor Configuration Register
184
185cr11: .equ	11
186
187sar: .equ	11			; Shift Amount Register
188
189cr12: .equ	12
190
191pidr3: .equ	12			; Protection ID 3
192
193cr13: .equ	13
194
195pidr4: .equ	13			; Protection ID 4
196
197cr14: .equ	14
198
199iva: .equ	14			; Interrupt Vector Address
200
201cr15: .equ	15
202
203eiem: .equ	15			; External Interrupt Enable Mask
204
205cr16: .equ	16
206
207itmr: .equ	16			; Interval Timer
208
209cr17: .equ	17
210
211pcsq: .equ	17			; Program Counter Space queue
212
213cr18: .equ	18
214
215pcoq: .equ	18			; Program Counter Offset queue
216
217cr19: .equ	19
218
219iir: .equ	19			; Interruption Instruction Register
220
221cr20: .equ	20
222
223isr: .equ	20			; Interruption Space Register
224
225cr21: .equ	21
226
227ior: .equ	21			; Interruption Offset Register
228
229cr22: .equ	22
230
231ipsw: .equ	22			; Interruption Processor Status Word
232
233cr23: .equ	23
234
235eirr: .equ	23			; External Interrupt Request
236
237cr24: .equ	24
238
239ppda: .equ	24			; Physical Page Directory Address
240
241tr0: .equ	24			; Temporary register 0
242
243cr25: .equ	25
244
245hta: .equ	25			; Hash Table Address
246
247tr1: .equ	25			; Temporary register 1
248
249cr26: .equ	26
250
251tr2: .equ	26			; Temporary register 2
252
253cr27: .equ	27
254
255tr3: .equ	27			; Temporary register 3
256
257cr28: .equ	28
258
259tr4: .equ	28			; Temporary register 4
260
261cr29: .equ	29
262
263tr5: .equ	29			; Temporary register 5
264
265cr30: .equ	30
266
267tr6: .equ	30			; Temporary register 6
268
269cr31: .equ	31
270
271tr7: .equ	31			; Temporary register 7
272
273;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
274; Procedure Call Convention						~
275; Register Definitions for Use with Assembler				~
276; version A.08.06							~
277;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
278; Software Architecture General Registers
279rp: .equ	r2	; return pointer
280
281mrp: .equ	r31	; millicode return pointer
282
283ret0: .equ	r28	; return value
284
285ret1: .equ	r29	; return value (high part of double)
286
287sl: .equ	r29	; static link
288
289sp: .equ	r30	; stack pointer
290
291dp: .equ	r27	; data pointer
292
293arg0: .equ	r26	; argument
294
295arg1: .equ	r25	; argument or high part of double argument
296
297arg2: .equ	r24	; argument
298
299arg3: .equ	r23	; argument or high part of double argument
300
301;_____________________________________________________________________________
302; Software Architecture Space Registers
303;		sr0	; return link form BLE
304sret: .equ	sr1	; return value
305
306sarg: .equ	sr1	; argument
307
308;		sr4	; PC SPACE tracker
309;		sr5	; process private data
310;_____________________________________________________________________________
311; Software Architecture Pseudo Registers
312previous_sp: .equ	64	; old stack pointer (locates previous frame)
313
314;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
315; Standard space and subspace definitions.  version A.08.06
316; These are generally suitable for programs on HP_UX and HPE.
317; Statements commented out are used when building such things as operating
318; system kernels.
319;;;;;;;;;;;;;;;;
320; Additional code subspaces should have ALIGN=8 for an interspace BV
321; and should have SORT=24.
322;
323; For an incomplete executable (program bound to shared libraries),
324; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
325; and $PLT$ subspaces respectively.
326;;;;;;;;;;;;;;;
327
328	.text
329	.EXPORT $$remI,millicode
330;	.IMPORT	cerror
331$$remI:
332	.PROC
333	.CALLINFO NO_CALLS
334	.ENTRY
335	addit,= 0,arg1,r0
336	add,>= r0,arg0,ret1
337	sub r0,ret1,ret1
338	sub r0,arg1,r1
339	ds r0,r1,r0
340	or r0,r0,r1
341	add ret1,ret1,ret1
342	ds r1,arg1,r1
343	addc ret1,ret1,ret1
344	ds r1,arg1,r1
345	addc ret1,ret1,ret1
346	ds r1,arg1,r1
347	addc ret1,ret1,ret1
348	ds r1,arg1,r1
349	addc ret1,ret1,ret1
350	ds r1,arg1,r1
351	addc ret1,ret1,ret1
352	ds r1,arg1,r1
353	addc ret1,ret1,ret1
354	ds r1,arg1,r1
355	addc ret1,ret1,ret1
356	ds r1,arg1,r1
357	addc ret1,ret1,ret1
358	ds r1,arg1,r1
359	addc ret1,ret1,ret1
360	ds r1,arg1,r1
361	addc ret1,ret1,ret1
362	ds r1,arg1,r1
363	addc ret1,ret1,ret1
364	ds r1,arg1,r1
365	addc ret1,ret1,ret1
366	ds r1,arg1,r1
367	addc ret1,ret1,ret1
368	ds r1,arg1,r1
369	addc ret1,ret1,ret1
370	ds r1,arg1,r1
371	addc ret1,ret1,ret1
372	ds r1,arg1,r1
373	addc ret1,ret1,ret1
374	ds r1,arg1,r1
375	addc ret1,ret1,ret1
376	ds r1,arg1,r1
377	addc ret1,ret1,ret1
378	ds r1,arg1,r1
379	addc ret1,ret1,ret1
380	ds r1,arg1,r1
381	addc ret1,ret1,ret1
382	ds r1,arg1,r1
383	addc ret1,ret1,ret1
384	ds r1,arg1,r1
385	addc ret1,ret1,ret1
386	ds r1,arg1,r1
387	addc ret1,ret1,ret1
388	ds r1,arg1,r1
389	addc ret1,ret1,ret1
390	ds r1,arg1,r1
391	addc ret1,ret1,ret1
392	ds r1,arg1,r1
393	addc ret1,ret1,ret1
394	ds r1,arg1,r1
395	addc ret1,ret1,ret1
396	ds r1,arg1,r1
397	addc ret1,ret1,ret1
398	ds r1,arg1,r1
399	addc ret1,ret1,ret1
400	ds r1,arg1,r1
401	addc ret1,ret1,ret1
402	ds r1,arg1,r1
403	addc ret1,ret1,ret1
404	ds r1,arg1,r1
405	addc ret1,ret1,ret1
406	movb,>=,n r1,ret1,remI300
407	add,< arg1,r0,r0
408	add,tr r1,arg1,ret1
409	sub r1,arg1,ret1
410remI300: add,>= arg0,r0,r0
411
412	sub r0,ret1,ret1
413	bv r0(r31)
414	nop
415	.EXIT
416	.PROCEND
417
418bit1:  .equ 1
419
420bit30: .equ 30
421bit31: .equ 31
422
423len2:  .equ 2
424
425len4:  .equ 4
426
427#if 0
428$$dyncall:
429	.proc
430	.callinfo NO_CALLS
431	.export $$dyncall,MILLICODE
432
433	bb,>=,n	22,bit30,noshlibs
434
435	depi	0,bit31,len2,22
436	ldw	4(22),19
437	ldw	0(22),22
438noshlibs:
439	ldsid	(22),r1
440	mtsp	r1,sr0
441	be	0(sr0,r22)
442	stw	rp,-24(sp)
443	.procend
444#endif
445
446$$sh_func_adrs:
447	.proc
448	.callinfo NO_CALLS
449	.export $$sh_func_adrs, millicode
450	ldo	0(r26),ret1
451	dep	r0,30,1,r26
452	probew	(r26),r31,r22
453	extru,=	r22,31,1,r22
454	bv	r0(r31)
455	ldws	0(r26),ret1
456	.procend
457
458temp: .EQU	r1
459
460retreg: .EQU	ret1	; r29
461
462	.export $$divU,millicode
463	.import $$divU_3,millicode
464	.import $$divU_5,millicode
465	.import $$divU_6,millicode
466	.import $$divU_7,millicode
467	.import $$divU_9,millicode
468	.import $$divU_10,millicode
469	.import $$divU_12,millicode
470	.import $$divU_14,millicode
471	.import $$divU_15,millicode
472$$divU:
473	.proc
474	.callinfo NO_CALLS
475; The subtract is not nullified since it does no harm and can be used
476; by the two cases that branch back to "normal".
477	comib,>=  15,arg1,special_divisor
478	sub	r0,arg1,temp		; clear carry, negate the divisor
479	ds	r0,temp,r0		; set V-bit to 1
480normal:
481	add	arg0,arg0,retreg	; shift msb bit into carry
482	ds	r0,arg1,temp		; 1st divide step, if no carry
483	addc	retreg,retreg,retreg	; shift retreg with/into carry
484	ds	temp,arg1,temp		; 2nd divide step
485	addc	retreg,retreg,retreg	; shift retreg with/into carry
486	ds	temp,arg1,temp		; 3rd divide step
487	addc	retreg,retreg,retreg	; shift retreg with/into carry
488	ds	temp,arg1,temp		; 4th divide step
489	addc	retreg,retreg,retreg	; shift retreg with/into carry
490	ds	temp,arg1,temp		; 5th divide step
491	addc	retreg,retreg,retreg	; shift retreg with/into carry
492	ds	temp,arg1,temp		; 6th divide step
493	addc	retreg,retreg,retreg	; shift retreg with/into carry
494	ds	temp,arg1,temp		; 7th divide step
495	addc	retreg,retreg,retreg	; shift retreg with/into carry
496	ds	temp,arg1,temp		; 8th divide step
497	addc	retreg,retreg,retreg	; shift retreg with/into carry
498	ds	temp,arg1,temp		; 9th divide step
499	addc	retreg,retreg,retreg	; shift retreg with/into carry
500	ds	temp,arg1,temp		; 10th divide step
501	addc	retreg,retreg,retreg	; shift retreg with/into carry
502	ds	temp,arg1,temp		; 11th divide step
503	addc	retreg,retreg,retreg	; shift retreg with/into carry
504	ds	temp,arg1,temp		; 12th divide step
505	addc	retreg,retreg,retreg	; shift retreg with/into carry
506	ds	temp,arg1,temp		; 13th divide step
507	addc	retreg,retreg,retreg	; shift retreg with/into carry
508	ds	temp,arg1,temp		; 14th divide step
509	addc	retreg,retreg,retreg	; shift retreg with/into carry
510	ds	temp,arg1,temp		; 15th divide step
511	addc	retreg,retreg,retreg	; shift retreg with/into carry
512	ds	temp,arg1,temp		; 16th divide step
513	addc	retreg,retreg,retreg	; shift retreg with/into carry
514	ds	temp,arg1,temp		; 17th divide step
515	addc	retreg,retreg,retreg	; shift retreg with/into carry
516	ds	temp,arg1,temp		; 18th divide step
517	addc	retreg,retreg,retreg	; shift retreg with/into carry
518	ds	temp,arg1,temp		; 19th divide step
519	addc	retreg,retreg,retreg	; shift retreg with/into carry
520	ds	temp,arg1,temp		; 20th divide step
521	addc	retreg,retreg,retreg	; shift retreg with/into carry
522	ds	temp,arg1,temp		; 21st divide step
523	addc	retreg,retreg,retreg	; shift retreg with/into carry
524	ds	temp,arg1,temp		; 22nd divide step
525	addc	retreg,retreg,retreg	; shift retreg with/into carry
526	ds	temp,arg1,temp		; 23rd divide step
527	addc	retreg,retreg,retreg	; shift retreg with/into carry
528	ds	temp,arg1,temp		; 24th divide step
529	addc	retreg,retreg,retreg	; shift retreg with/into carry
530	ds	temp,arg1,temp		; 25th divide step
531	addc	retreg,retreg,retreg	; shift retreg with/into carry
532	ds	temp,arg1,temp		; 26th divide step
533	addc	retreg,retreg,retreg	; shift retreg with/into carry
534	ds	temp,arg1,temp		; 27th divide step
535	addc	retreg,retreg,retreg	; shift retreg with/into carry
536	ds	temp,arg1,temp		; 28th divide step
537	addc	retreg,retreg,retreg	; shift retreg with/into carry
538	ds	temp,arg1,temp		; 29th divide step
539	addc	retreg,retreg,retreg	; shift retreg with/into carry
540	ds	temp,arg1,temp		; 30th divide step
541	addc	retreg,retreg,retreg	; shift retreg with/into carry
542	ds	temp,arg1,temp		; 31st divide step
543	addc	retreg,retreg,retreg	; shift retreg with/into carry
544	ds	temp,arg1,temp		; 32nd divide step,
545	bv	0(r31)
546	addc	retreg,retreg,retreg	; shift last retreg bit into retreg
547;_____________________________________________________________________________
548; handle the cases where divisor is a small constant or has high bit on
549special_divisor:
550	blr	arg1,r0
551	comib,>,n  0,arg1,big_divisor	; nullify previous instruction
552zero_divisor: ; this label is here to provide external visibility
553
554	addit,=	0,arg1,0		; trap for zero dvr
555	nop
556	bv	0(r31)			; divisor == 1
557	copy	arg0,retreg
558	bv	0(r31)			; divisor == 2
559	extru	arg0,30,31,retreg
560	 b,n	$$divU_3		; divisor == 3
561	nop
562	bv	0(r31)			; divisor == 4
563	extru	arg0,29,30,retreg
564	 b,n	$$divU_5		; divisor == 5
565	nop
566	 b,n	$$divU_6		; divisor == 6
567	nop
568	 b,n	$$divU_7		; divisor == 7
569	nop
570	bv	0(r31)			; divisor == 8
571	extru	arg0,28,29,retreg
572	 b,n	$$divU_9		; divisor == 9
573	nop
574	 b,n	$$divU_10		; divisor == 10
575	nop
576	b	normal			; divisor == 11
577	ds	r0,temp,r0		; set V-bit to 1
578	 b,n	$$divU_12		; divisor == 12
579	nop
580	b	normal			; divisor == 13
581	ds	r0,temp,r0		; set V-bit to 1
582	 b,n	$$divU_14		; divisor == 14
583	nop
584	 b,n	$$divU_15		; divisor == 15
585	nop
586;_____________________________________________________________________________
587; Handle the case where the high bit is on in the divisor.
588; Compute:	if( dividend>=divisor) quotient=1; else quotient=0;
589; Note:		dividend>==divisor iff dividend-divisor does not borrow
590; and		not borrow iff carry
591big_divisor:
592	sub	arg0,arg1,r0
593	bv	0(r31)
594	addc	r0,r0,retreg
595	.procend
596	.end
597
598t2: .EQU	r1
599
600; x2	.EQU	arg0	; r26
601t1: .EQU	arg1	; r25
602
603; x1	.EQU	ret1	; r29
604;_____________________________________________________________________________
605
606$$divide_by_constant:
607	.PROC
608	.CALLINFO NO_CALLS
609	.export $$divide_by_constant,millicode
610; Provides a "nice" label for the code covered by the unwind descriptor
611; for things like gprof.
612
613$$divI_2:
614	.EXPORT	$$divI_2,MILLICODE
615	COMCLR,>=	arg0,0,0
616	ADDI		1,arg0,arg0
617	bv		0(r31)
618	EXTRS		arg0,30,31,ret1
619
620$$divI_4:
621	.EXPORT		$$divI_4,MILLICODE
622	COMCLR,>=	arg0,0,0
623	ADDI		3,arg0,arg0
624	bv		0(r31)
625	EXTRS		arg0,29,30,ret1
626
627$$divI_8:
628	.EXPORT		$$divI_8,MILLICODE
629	COMCLR,>=	arg0,0,0
630	ADDI		7,arg0,arg0
631	bv		0(r31)
632	EXTRS		arg0,28,29,ret1
633
634$$divI_16:
635	.EXPORT		$$divI_16,MILLICODE
636	COMCLR,>=	arg0,0,0
637	ADDI		15,arg0,arg0
638	bv		0(r31)
639	EXTRS		arg0,27,28,ret1
640
641$$divI_3:
642	.EXPORT		$$divI_3,MILLICODE
643	COMB,<,N	arg0,0,$neg3
644
645	ADDI		1,arg0,arg0
646	EXTRU		arg0,1,2,ret1
647	SH2ADD		arg0,arg0,arg0
648	B		$pos
649	ADDC		ret1,0,ret1
650
651$neg3:
652	SUBI		1,arg0,arg0
653	EXTRU		arg0,1,2,ret1
654	SH2ADD		arg0,arg0,arg0
655	B		$neg
656	ADDC		ret1,0,ret1
657
658$$divU_3:
659	.EXPORT		$$divU_3,MILLICODE
660	ADDI		1,arg0,arg0
661	ADDC		0,0,ret1
662	SHD		ret1,arg0,30,t1
663	SH2ADD		arg0,arg0,arg0
664	B		$pos
665	ADDC		ret1,t1,ret1
666
667$$divI_5:
668	.EXPORT		$$divI_5,MILLICODE
669	COMB,<,N	arg0,0,$neg5
670	ADDI		3,arg0,t1
671	SH1ADD		arg0,t1,arg0
672	B		$pos
673	ADDC		0,0,ret1
674
675$neg5:
676	SUB		0,arg0,arg0
677	ADDI		1,arg0,arg0
678	SHD		0,arg0,31,ret1
679	SH1ADD		arg0,arg0,arg0
680	B		$neg
681	ADDC		ret1,0,ret1
682
683$$divU_5:
684	.EXPORT		$$divU_5,MILLICODE
685	ADDI		1,arg0,arg0
686	ADDC		0,0,ret1
687	SHD		ret1,arg0,31,t1
688	SH1ADD		arg0,arg0,arg0
689	B		$pos
690	ADDC		t1,ret1,ret1
691
692$$divI_6:
693	.EXPORT		$$divI_6,MILLICODE
694	COMB,<,N	arg0,0,$neg6
695	EXTRU		arg0,30,31,arg0
696	ADDI		5,arg0,t1
697	SH2ADD		arg0,t1,arg0
698	B		$pos
699	ADDC		0,0,ret1
700
701$neg6:
702	SUBI		2,arg0,arg0
703	EXTRU		arg0,30,31,arg0
704	SHD		0,arg0,30,ret1
705	SH2ADD		arg0,arg0,arg0
706	B		$neg
707	ADDC		ret1,0,ret1
708
709$$divU_6:
710	.EXPORT		$$divU_6,MILLICODE
711	EXTRU		arg0,30,31,arg0
712	ADDI		1,arg0,arg0
713	SHD		0,arg0,30,ret1
714	SH2ADD		arg0,arg0,arg0
715	B		$pos
716	ADDC		ret1,0,ret1
717
718$$divU_10:
719	.EXPORT		$$divU_10,MILLICODE
720	EXTRU		arg0,30,31,arg0
721	ADDI		3,arg0,t1
722	SH1ADD		arg0,t1,arg0
723	ADDC		0,0,ret1
724$pos:
725	SHD		ret1,arg0,28,t1
726	SHD		arg0,0,28,t2
727	ADD		arg0,t2,arg0
728	ADDC		ret1,t1,ret1
729$pos_for_17:
730	SHD		ret1,arg0,24,t1
731	SHD		arg0,0,24,t2
732	ADD		arg0,t2,arg0
733	ADDC		ret1,t1,ret1
734
735	SHD		ret1,arg0,16,t1
736	SHD		arg0,0,16,t2
737	ADD		arg0,t2,arg0
738	bv		0(r31)
739	ADDC		ret1,t1,ret1
740
741$$divI_10:
742	.EXPORT		$$divI_10,MILLICODE
743	COMB,<		arg0,0,$neg10
744	COPY		0,ret1
745	EXTRU		arg0,30,31,arg0
746	ADDIB,TR	1,arg0,$pos
747	SH1ADD		arg0,arg0,arg0
748
749$neg10:
750	SUBI		2,arg0,arg0
751	EXTRU		arg0,30,31,arg0
752	SH1ADD		arg0,arg0,arg0
753$neg:
754	SHD		ret1,arg0,28,t1
755	SHD		arg0,0,28,t2
756	ADD		arg0,t2,arg0
757	ADDC		ret1,t1,ret1
758$neg_for_17:
759	SHD		ret1,arg0,24,t1
760	SHD		arg0,0,24,t2
761	ADD		arg0,t2,arg0
762	ADDC		ret1,t1,ret1
763
764	SHD		ret1,arg0,16,t1
765	SHD		arg0,0,16,t2
766	ADD		arg0,t2,arg0
767	ADDC		ret1,t1,ret1
768	bv		0(r31)
769	SUB		0,ret1,ret1
770
771$$divI_12:
772	.EXPORT		$$divI_12,MILLICODE
773	COMB,<		arg0,0,$neg12
774	COPY		0,ret1
775	EXTRU		arg0,29,30,arg0
776	ADDIB,TR	1,arg0,$pos
777	SH2ADD		arg0,arg0,arg0
778
779$neg12:
780	SUBI		4,arg0,arg0
781	EXTRU		arg0,29,30,arg0
782	B		$neg
783	SH2ADD		arg0,arg0,arg0
784
785$$divU_12:
786	.EXPORT		$$divU_12,MILLICODE
787	EXTRU		arg0,29,30,arg0
788	ADDI		5,arg0,t1
789	SH2ADD		arg0,t1,arg0
790	B		$pos
791	ADDC		0,0,ret1
792
793$$divI_15:
794	.EXPORT		$$divI_15,MILLICODE
795	COMB,<		arg0,0,$neg15
796	COPY		0,ret1
797	ADDIB,TR	1,arg0,$pos+4
798	SHD		ret1,arg0,28,t1
799
800$neg15:
801	B		$neg
802	SUBI		1,arg0,arg0
803
804$$divU_15:
805	.EXPORT		$$divU_15,MILLICODE
806	ADDI		1,arg0,arg0
807	B		$pos
808	ADDC		0,0,ret1
809
810$$divI_17:
811	.EXPORT		$$divI_17,MILLICODE
812	COMB,<,N	arg0,0,$neg17
813	ADDI		1,arg0,arg0
814	SHD		0,arg0,28,t1
815	SHD		arg0,0,28,t2
816	SUB		t2,arg0,arg0
817	B		$pos_for_17
818	SUBB		t1,0,ret1
819
820$neg17:
821	SUBI		1,arg0,arg0
822	SHD		0,arg0,28,t1
823	SHD		arg0,0,28,t2
824	SUB		t2,arg0,arg0
825	B		$neg_for_17
826	SUBB		t1,0,ret1
827
828$$divU_17:
829	.EXPORT		$$divU_17,MILLICODE
830	ADDI		1,arg0,arg0
831	ADDC		0,0,ret1
832	SHD		ret1,arg0,28,t1
833$u17:
834	SHD		arg0,0,28,t2
835	SUB		t2,arg0,arg0
836	B		$pos_for_17
837	SUBB		t1,ret1,ret1
838
839$$divI_7:
840	.EXPORT		$$divI_7,MILLICODE
841	COMB,<,N	arg0,0,$neg7
842$7:
843	ADDI		1,arg0,arg0
844	SHD		0,arg0,29,ret1
845	SH3ADD		arg0,arg0,arg0
846	ADDC		ret1,0,ret1
847$pos7:
848	SHD		ret1,arg0,26,t1
849	SHD		arg0,0,26,t2
850	ADD		arg0,t2,arg0
851	ADDC		ret1,t1,ret1
852
853	SHD		ret1,arg0,20,t1
854	SHD		arg0,0,20,t2
855	ADD		arg0,t2,arg0
856	ADDC		ret1,t1,t1
857
858	COPY		0,ret1
859	SHD,=		t1,arg0,24,t1
860$1:
861	ADDB,TR		t1,ret1,$2
862	EXTRU		arg0,31,24,arg0
863
864	bv,n  0(r31)
865
866$2:
867	ADDB,TR		t1,arg0,$1
868	EXTRU,=		arg0,7,8,t1
869
870$neg7:
871	SUBI		1,arg0,arg0
872$8:
873	SHD		0,arg0,29,ret1
874	SH3ADD		arg0,arg0,arg0
875	ADDC		ret1,0,ret1
876
877$neg7_shift:
878	SHD		ret1,arg0,26,t1
879	SHD		arg0,0,26,t2
880	ADD		arg0,t2,arg0
881	ADDC		ret1,t1,ret1
882
883	SHD		ret1,arg0,20,t1
884	SHD		arg0,0,20,t2
885	ADD		arg0,t2,arg0
886	ADDC		ret1,t1,t1
887
888	COPY		0,ret1
889	SHD,=		t1,arg0,24,t1
890$3:
891	ADDB,TR		t1,ret1,$4
892	EXTRU		arg0,31,24,arg0
893
894	bv		0(r31)
895	SUB		0,ret1,ret1
896
897$4:
898	ADDB,TR		t1,arg0,$3
899	EXTRU,=		arg0,7,8,t1
900
901$$divU_7:
902	.EXPORT		$$divU_7,MILLICODE
903	ADDI		1,arg0,arg0
904	ADDC		0,0,ret1
905	SHD		ret1,arg0,29,t1
906	SH3ADD		arg0,arg0,arg0
907	B		$pos7
908	ADDC		t1,ret1,ret1
909
910$$divI_9:
911	.EXPORT		$$divI_9,MILLICODE
912	COMB,<,N	arg0,0,$neg9
913	ADDI		1,arg0,arg0
914	SHD		0,arg0,29,t1
915	SHD		arg0,0,29,t2
916	SUB		t2,arg0,arg0
917	B		$pos7
918	SUBB		t1,0,ret1
919
920$neg9:
921	SUBI		1,arg0,arg0
922	SHD		0,arg0,29,t1
923	SHD		arg0,0,29,t2
924	SUB		t2,arg0,arg0
925	B		$neg7_shift
926	SUBB		t1,0,ret1
927
928$$divU_9:
929	.EXPORT		$$divU_9,MILLICODE
930	ADDI		1,arg0,arg0
931	ADDC		0,0,ret1
932	SHD		ret1,arg0,29,t1
933	SHD		arg0,0,29,t2
934	SUB		t2,arg0,arg0
935	B		$pos7
936	SUBB		t1,ret1,ret1
937
938$$divI_14:
939	.EXPORT		$$divI_14,MILLICODE
940	COMB,<,N	arg0,0,$neg14
941$$divU_14:
942	.EXPORT		$$divU_14,MILLICODE
943	B		$7
944	EXTRU		arg0,30,31,arg0
945
946$neg14:
947	SUBI		2,arg0,arg0
948	B		$8
949	EXTRU		arg0,30,31,arg0
950
951	.PROCEND
952	.END
953
954rmndr: .EQU	ret1	; r29
955
956	.export $$remU,millicode
957$$remU:
958	.proc
959	.callinfo NO_CALLS
960	.entry
961
962	comib,>=,n  0,arg1,special_case
963	sub	r0,arg1,rmndr		; clear carry, negate the divisor
964	ds	r0,rmndr,r0		; set V-bit to 1
965	add	arg0,arg0,temp		; shift msb bit into carry
966	ds	r0,arg1,rmndr		; 1st divide step, if no carry
967	addc	temp,temp,temp		; shift temp with/into carry
968	ds	rmndr,arg1,rmndr		; 2nd divide step
969	addc	temp,temp,temp		; shift temp with/into carry
970	ds	rmndr,arg1,rmndr		; 3rd divide step
971	addc	temp,temp,temp		; shift temp with/into carry
972	ds	rmndr,arg1,rmndr		; 4th divide step
973	addc	temp,temp,temp		; shift temp with/into carry
974	ds	rmndr,arg1,rmndr		; 5th divide step
975	addc	temp,temp,temp		; shift temp with/into carry
976	ds	rmndr,arg1,rmndr		; 6th divide step
977	addc	temp,temp,temp		; shift temp with/into carry
978	ds	rmndr,arg1,rmndr		; 7th divide step
979	addc	temp,temp,temp		; shift temp with/into carry
980	ds	rmndr,arg1,rmndr		; 8th divide step
981	addc	temp,temp,temp		; shift temp with/into carry
982	ds	rmndr,arg1,rmndr		; 9th divide step
983	addc	temp,temp,temp		; shift temp with/into carry
984	ds	rmndr,arg1,rmndr		; 10th divide step
985	addc	temp,temp,temp		; shift temp with/into carry
986	ds	rmndr,arg1,rmndr		; 11th divide step
987	addc	temp,temp,temp		; shift temp with/into carry
988	ds	rmndr,arg1,rmndr		; 12th divide step
989	addc	temp,temp,temp		; shift temp with/into carry
990	ds	rmndr,arg1,rmndr		; 13th divide step
991	addc	temp,temp,temp		; shift temp with/into carry
992	ds	rmndr,arg1,rmndr		; 14th divide step
993	addc	temp,temp,temp		; shift temp with/into carry
994	ds	rmndr,arg1,rmndr		; 15th divide step
995	addc	temp,temp,temp		; shift temp with/into carry
996	ds	rmndr,arg1,rmndr		; 16th divide step
997	addc	temp,temp,temp		; shift temp with/into carry
998	ds	rmndr,arg1,rmndr		; 17th divide step
999	addc	temp,temp,temp		; shift temp with/into carry
1000	ds	rmndr,arg1,rmndr		; 18th divide step
1001	addc	temp,temp,temp		; shift temp with/into carry
1002	ds	rmndr,arg1,rmndr		; 19th divide step
1003	addc	temp,temp,temp		; shift temp with/into carry
1004	ds	rmndr,arg1,rmndr		; 20th divide step
1005	addc	temp,temp,temp		; shift temp with/into carry
1006	ds	rmndr,arg1,rmndr		; 21st divide step
1007	addc	temp,temp,temp		; shift temp with/into carry
1008	ds	rmndr,arg1,rmndr		; 22nd divide step
1009	addc	temp,temp,temp		; shift temp with/into carry
1010	ds	rmndr,arg1,rmndr		; 23rd divide step
1011	addc	temp,temp,temp		; shift temp with/into carry
1012	ds	rmndr,arg1,rmndr		; 24th divide step
1013	addc	temp,temp,temp		; shift temp with/into carry
1014	ds	rmndr,arg1,rmndr		; 25th divide step
1015	addc	temp,temp,temp		; shift temp with/into carry
1016	ds	rmndr,arg1,rmndr		; 26th divide step
1017	addc	temp,temp,temp		; shift temp with/into carry
1018	ds	rmndr,arg1,rmndr		; 27th divide step
1019	addc	temp,temp,temp		; shift temp with/into carry
1020	ds	rmndr,arg1,rmndr		; 28th divide step
1021	addc	temp,temp,temp		; shift temp with/into carry
1022	ds	rmndr,arg1,rmndr		; 29th divide step
1023	addc	temp,temp,temp		; shift temp with/into carry
1024	ds	rmndr,arg1,rmndr		; 30th divide step
1025	addc	temp,temp,temp		; shift temp with/into carry
1026	ds	rmndr,arg1,rmndr		; 31st divide step
1027	addc	temp,temp,temp		; shift temp with/into carry
1028	ds	rmndr,arg1,rmndr		; 32nd divide step,
1029	comiclr,<= 0,rmndr,r0
1030	  add	rmndr,arg1,rmndr	; correction
1031;	.exit
1032	bv,n  0(r31)
1033	nop
1034; Putting >= on the last DS and deleting COMICLR does not work!
1035;_____________________________________________________________________________
1036special_case:
1037	addit,=	0,arg1,r0		; trap on div by zero
1038	sub,>>=	arg0,arg1,rmndr
1039	  copy	arg0,rmndr
1040	.exit
1041	bv,n  0(r31)
1042	nop
1043	.procend
1044	.end
1045
1046; Use bv  0(r31) and bv,n  0(r31) instead.
1047; #define	return		bv	0(%mrp)
1048; #define	return_n	bv,n	0(%mrp)
1049
1050	.align 16
1051$$mulI:
1052
1053	.proc
1054	.callinfo NO_CALLS
1055	.export $$mulI, millicode
1056	combt,<<=	%r25,%r26,l4	; swap args if unsigned %r25>%r26
1057	copy		0,%r29		; zero out the result
1058	xor		%r26,%r25,%r26	; swap %r26 & %r25 using the
1059	xor		%r26,%r25,%r25	;  old xor trick
1060	xor		%r26,%r25,%r26
1061l4: combt,<=	0,%r26,l3		; if %r26>=0 then proceed like unsigned
1062
1063	zdep		%r25,30,8,%r1	; %r1 = (%r25&0xff)<<1 *********
1064	sub,>		0,%r25,%r1		; otherwise negate both and
1065	combt,<=,n	%r26,%r1,l2	;  swap back if |%r26|<|%r25|
1066	sub		0,%r26,%r25
1067	movb,tr,n	%r1,%r26,l2	; 10th inst.
1068
1069l0:	add	%r29,%r1,%r29				; add in this partial product
1070
1071l1: zdep	%r26,23,24,%r26			; %r26 <<= 8 ******************
1072
1073l2: zdep		%r25,30,8,%r1	; %r1 = (%r25&0xff)<<1 *********
1074
1075l3: blr		%r1,0		; case on these 8 bits ******
1076
1077	extru		%r25,23,24,%r25	; %r25 >>= 8 ******************
1078
1079;16 insts before this.
1080;			  %r26 <<= 8 **************************
1081x0: comb,<>	%r25,0,l2	! zdep	%r26,23,24,%r26	! bv,n  0(r31)	! nop
1082
1083x1: comb,<>	%r25,0,l1	!	add	%r29,%r26,%r29	! bv,n  0(r31)	! nop
1084
1085x2: comb,<>	%r25,0,l1	! sh1add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1086
1087x3: comb,<>	%r25,0,l0	!	sh1add	%r26,%r26,%r1	! bv	0(r31)	!	add	%r29,%r1,%r29
1088
1089x4: comb,<>	%r25,0,l1	! sh2add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1090
1091x5: comb,<>	%r25,0,l0	!	sh2add	%r26,%r26,%r1	! bv	0(r31)	!	add	%r29,%r1,%r29
1092
1093x6:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1094
1095x7:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r26,%r29,%r29	! b,n	ret_t0
1096
1097x8: comb,<>	%r25,0,l1	! sh3add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1098
1099x9: comb,<>	%r25,0,l0	!	sh3add	%r26,%r26,%r1	! bv	0(r31)	!	add	%r29,%r1,%r29
1100
1101x10:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1102
1103x11:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r29,%r29	! b,n	ret_t0
1104
1105x12:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1106
1107x13:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r29,%r29	! b,n	ret_t0
1108
1109x14:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1110
1111x15:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	!	sh1add	%r1,%r1,%r1	! b,n	ret_t0
1112
1113x16: zdep	%r26,27,28,%r1	! comb,<>	%r25,0,l1	!	add	%r29,%r1,%r29	! bv,n  0(r31)
1114
1115x17:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r1,%r1	! b,n	ret_t0
1116
1117x18:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1118
1119x19:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh1add	%r1,%r26,%r1	! b,n	ret_t0
1120
1121x20:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1122
1123x21:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1124
1125x22:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1126
1127x23:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1128
1129x24:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1130
1131x25:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	!	sh2add	%r1,%r1,%r1	! b,n	ret_t0
1132
1133x26:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1134
1135x27:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	!	sh3add	%r1,%r1,%r1	! b,n	ret_t0
1136
1137x28:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1138
1139x29:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1140
1141x30:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1142
1143x31: zdep	%r26,26,27,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1144
1145x32: zdep	%r26,26,27,%r1	! comb,<>	%r25,0,l1	!	add	%r29,%r1,%r29	! bv,n  0(r31)
1146
1147x33:	sh3add	%r26,0,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1148
1149x34: zdep	%r26,27,28,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1150
1151x35:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh3add	%r26,%r1,%r1
1152
1153x36:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1154
1155x37:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1156
1157x38:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1158
1159x39:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1160
1161x40:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1162
1163x41:	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r1,%r26,%r1	! b,n	ret_t0
1164
1165x42:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1166
1167x43:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1168
1169x44:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1170
1171x45:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	!	sh2add	%r1,%r1,%r1	! b,n	ret_t0
1172
1173x46:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! add	%r1,%r26,%r1
1174
1175x47:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh1add	%r26,%r1,%r1
1176
1177x48:	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! zdep	%r1,27,28,%r1	! b,n	ret_t0
1178
1179x49:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r26,%r1,%r1
1180
1181x50:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1182
1183x51:	sh3add	%r26,%r26,%r1		! sh3add	%r26,%r1,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1184
1185x52:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1186
1187x53:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1188
1189x54:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1190
1191x55:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1192
1193x56:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1194
1195x57:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1196
1197x58:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1198
1199x59:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	!	sh1add	%r1,%r1,%r1
1200
1201x60:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1202
1203x61:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1204
1205x62: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1206
1207x63: zdep	%r26,25,26,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1208
1209x64: zdep	%r26,25,26,%r1	! comb,<>	%r25,0,l1	!	add	%r29,%r1,%r29	! bv,n  0(r31)
1210
1211x65:	sh3add	%r26,0,%r1		! comb,<>	%r25,0,l0	! sh3add	%r1,%r26,%r1	! b,n	ret_t0
1212
1213x66: zdep	%r26,26,27,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1214
1215x67:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1216
1217x68:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1218
1219x69:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1220
1221x70: zdep	%r26,25,26,%r1	! sh2add	%r26,%r1,%r1	!	b	e_t0	! sh1add	%r26,%r1,%r1
1222
1223x71:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,0,%r1	!	b	e_t0	! sub	%r1,%r26,%r1
1224
1225x72:	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1226
1227x73:	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	!	add	%r29,%r1,%r29
1228
1229x74:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1230
1231x75:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1232
1233x76:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1234
1235x77:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1236
1237x78:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1238
1239x79: zdep	%r26,27,28,%r1	!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sub	%r1,%r26,%r1
1240
1241x80: zdep	%r26,27,28,%r1	!	sh2add	%r1,%r1,%r1	! b	e_shift	!	add	%r29,%r1,%r29
1242
1243x81:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	! b	e_shift	!	add	%r29,%r1,%r29
1244
1245x82:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1246
1247x83:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1248
1249x84:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1250
1251x85:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1252
1253x86:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1254
1255x87:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	! b	e_t02a0	! sh2add	%r26,%r1,%r1
1256
1257x88:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1258
1259x89:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1260
1261x90:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1262
1263x91:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1264
1265x92:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	! sh1add	%r1,%r26,%r1
1266
1267x93: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1268
1269x94:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_2t0	! sh1add	%r26,%r1,%r1
1270
1271x95:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1272
1273x96:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1274
1275x97:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1276
1277x98: zdep	%r26,26,27,%r1	!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh1add	%r26,%r1,%r1
1278
1279x99:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1280
1281x100:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1282
1283x101:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1284
1285x102: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1286
1287x103:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_t02a0	! sh2add	%r1,%r26,%r1
1288
1289x104:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1290
1291x105:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1292
1293x106:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1294
1295x107:	sh3add	%r26,%r26,%r1		! sh2add	%r26,%r1,%r1	! b	e_t02a0	! sh3add	%r1,%r26,%r1
1296
1297x108:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1298
1299x109:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1300
1301x110:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1302
1303x111:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1304
1305x112:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! zdep	%r1,27,28,%r1
1306
1307x113:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	!	sh1add	%r1,%r1,%r1
1308
1309x114:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	!	sh1add	%r1,%r1,%r1
1310
1311x115:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	!	sh1add	%r1,%r1,%r1
1312
1313x116:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	! sh2add	%r1,%r26,%r1
1314
1315x117:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh3add	%r1,%r1,%r1
1316
1317x118:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t0a0	!	sh3add	%r1,%r1,%r1
1318
1319x119:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	!	sh3add	%r1,%r1,%r1
1320
1321x120:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1322
1323x121:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1324
1325x122:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1326
1327x123:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1328
1329x124: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1330
1331x125:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1332
1333x126: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1334
1335x127: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1336
1337x128: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l1	!	add	%r29,%r1,%r29	! bv,n  0(r31)
1338
1339x129: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l0	! add	%r1,%r26,%r1	! b,n	ret_t0
1340
1341x130: zdep	%r26,25,26,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1342
1343x131:	sh3add	%r26,0,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1344
1345x132:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1346
1347x133:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1348
1349x134:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1350
1351x135:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1352
1353x136:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1354
1355x137:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1356
1357x138:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1358
1359x139:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	! sh2add	%r1,%r26,%r1
1360
1361x140:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	!	sh2add	%r1,%r1,%r1
1362
1363x141:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1364
1365x142:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,0,%r1	!	b	e_2t0	! sub	%r1,%r26,%r1
1366
1367x143: zdep	%r26,27,28,%r1	!	sh3add	%r1,%r1,%r1	!	b	e_t0	! sub	%r1,%r26,%r1
1368
1369x144:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,0,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1370
1371x145:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,0,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1372
1373x146:	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1374
1375x147:	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1376
1377x148:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1378
1379x149:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1380
1381x150:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1382
1383x151:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1384
1385x152:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1386
1387x153:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1388
1389x154:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1390
1391x155: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1392
1393x156:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	! sh1add	%r1,%r26,%r1
1394
1395x157: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_t02a0	!	sh2add	%r1,%r1,%r1
1396
1397x158: zdep	%r26,27,28,%r1	!	sh2add	%r1,%r1,%r1	!	b	e_2t0	! sub	%r1,%r26,%r1
1398
1399x159: zdep	%r26,26,27,%r1	!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sub	%r1,%r26,%r1
1400
1401x160:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,0,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1402
1403x161:	sh3add	%r26,0,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1404
1405x162:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1406
1407x163:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	!	b	e_t0	! sh1add	%r1,%r26,%r1
1408
1409x164:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1410
1411x165:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1412
1413x166:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1414
1415x167:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1416
1417x168:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1418
1419x169:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1420
1421x170: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1422
1423x171:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_t0	!	sh3add	%r1,%r1,%r1
1424
1425x172:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_4t0	! sh1add	%r1,%r26,%r1
1426
1427x173:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	!	sh3add	%r1,%r1,%r1
1428
1429x174: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	! b	e_t04a0	!	sh2add	%r1,%r1,%r1
1430
1431x175:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_5t0	! sh1add	%r1,%r26,%r1
1432
1433x176:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_8t0	! add	%r1,%r26,%r1
1434
1435x177:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_8t0a0	! add	%r1,%r26,%r1
1436
1437x178:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	! sh3add	%r1,%r26,%r1
1438
1439x179:	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	! sh3add	%r1,%r26,%r1
1440
1441x180:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1442
1443x181:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1444
1445x182:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_2t0	! sh1add	%r1,%r26,%r1
1446
1447x183:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1448
1449x184:	sh2add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	!	b	e_4t0	! add	%r1,%r26,%r1
1450
1451x185:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1452
1453x186: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	!	b	e_2t0	!	sh1add	%r1,%r1,%r1
1454
1455x187:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	!	sh2add	%r1,%r1,%r1
1456
1457x188:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_4t0	! sh1add	%r26,%r1,%r1
1458
1459x189:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_t0	!	sh3add	%r1,%r1,%r1
1460
1461x190:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_2t0	!	sh2add	%r1,%r1,%r1
1462
1463x191: zdep	%r26,25,26,%r1	!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sub	%r1,%r26,%r1
1464
1465x192:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1466
1467x193:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1468
1469x194:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1470
1471x195:	sh3add	%r26,0,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1472
1473x196:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_4t0	! sh1add	%r1,%r26,%r1
1474
1475x197:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1476
1477x198: zdep	%r26,25,26,%r1	! sh1add	%r26,%r1,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1478
1479x199:	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	!	sh1add	%r1,%r1,%r1
1480
1481x200:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1482
1483x201:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1484
1485x202:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1486
1487x203:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_2t0a0	! sh2add	%r1,%r26,%r1
1488
1489x204:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	!	sh1add	%r1,%r1,%r1
1490
1491x205:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1492
1493x206: zdep	%r26,25,26,%r1	! sh2add	%r26,%r1,%r1	! b	e_t02a0	!	sh1add	%r1,%r1,%r1
1494
1495x207:	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	!	b	e_3t0	! sh2add	%r1,%r26,%r1
1496
1497x208:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_8t0	! add	%r1,%r26,%r1
1498
1499x209:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_8t0a0	! add	%r1,%r26,%r1
1500
1501x210:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	!	sh2add	%r1,%r1,%r1
1502
1503x211:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	!	sh2add	%r1,%r1,%r1
1504
1505x212:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_4t0	! sh2add	%r1,%r26,%r1
1506
1507x213:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_4t0a0	! sh2add	%r1,%r26,%r1
1508
1509x214:	sh3add	%r26,%r26,%r1		! sh2add	%r26,%r1,%r1	! b	e2t04a0	! sh3add	%r1,%r26,%r1
1510
1511x215:	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_5t0	! sh1add	%r1,%r26,%r1
1512
1513x216:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1514
1515x217:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1516
1517x218:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_2t0	! sh2add	%r1,%r26,%r1
1518
1519x219:	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1520
1521x220:	sh1add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	!	b	e_4t0	! sh1add	%r1,%r26,%r1
1522
1523x221:	sh1add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1524
1525x222:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	!	sh1add	%r1,%r1,%r1
1526
1527x223:	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	!	sh1add	%r1,%r1,%r1
1528
1529x224:	sh3add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_8t0	! add	%r1,%r26,%r1
1530
1531x225:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_t0	!	sh2add	%r1,%r1,%r1
1532
1533x226:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	! zdep	%r1,26,27,%r1
1534
1535x227:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_t02a0	!	sh2add	%r1,%r1,%r1
1536
1537x228:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_4t0	!	sh1add	%r1,%r1,%r1
1538
1539x229:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_4t0a0	!	sh1add	%r1,%r1,%r1
1540
1541x230:	sh3add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_5t0	! add	%r1,%r26,%r1
1542
1543x231:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_3t0	! sh2add	%r1,%r26,%r1
1544
1545x232:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	!	b	e_8t0	! sh2add	%r1,%r26,%r1
1546
1547x233:	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_8t0a0	! sh2add	%r1,%r26,%r1
1548
1549x234:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	!	b	e_2t0	!	sh3add	%r1,%r1,%r1
1550
1551x235:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	!	sh3add	%r1,%r1,%r1
1552
1553x236:	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e4t08a0	!	sh1add	%r1,%r1,%r1
1554
1555x237: zdep	%r26,27,28,%r1	!	sh2add	%r1,%r1,%r1	!	b	e_3t0	! sub	%r1,%r26,%r1
1556
1557x238:	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e2t04a0	!	sh3add	%r1,%r1,%r1
1558
1559x239: zdep	%r26,27,28,%r1	!	sh2add	%r1,%r1,%r1	! b	e_t0ma0	!	sh1add	%r1,%r1,%r1
1560
1561x240:	sh3add	%r26,%r26,%r1		! add	%r1,%r26,%r1	!	b	e_8t0	!	sh1add	%r1,%r1,%r1
1562
1563x241:	sh3add	%r26,%r26,%r1		! add	%r1,%r26,%r1	! b	e_8t0a0	!	sh1add	%r1,%r1,%r1
1564
1565x242:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_2t0	! sh3add	%r1,%r26,%r1
1566
1567x243:	sh3add	%r26,%r26,%r1		!	sh3add	%r1,%r1,%r1	!	b	e_t0	!	sh1add	%r1,%r1,%r1
1568
1569x244:	sh2add	%r26,%r26,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_4t0	! sh2add	%r1,%r26,%r1
1570
1571x245:	sh3add	%r26,0,%r1		!	sh1add	%r1,%r1,%r1	!	b	e_5t0	! sh1add	%r1,%r26,%r1
1572
1573x246:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	!	b	e_2t0	!	sh1add	%r1,%r1,%r1
1574
1575x247:	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_2t0a0	!	sh1add	%r1,%r1,%r1
1576
1577x248: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1578
1579x249: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	!	b	e_t0	! sh3add	%r1,%r26,%r1
1580
1581x250:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	!	b	e_2t0	!	sh2add	%r1,%r1,%r1
1582
1583x251:	sh2add	%r26,%r26,%r1		!	sh2add	%r1,%r1,%r1	! b	e_2t0a0	!	sh2add	%r1,%r1,%r1
1584
1585x252: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1586
1587x253: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	!	b	e_t0	! sh2add	%r1,%r26,%r1
1588
1589x254: zdep	%r26,24,25,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1590
1591x255: zdep	%r26,23,24,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1592
1593;1040 insts before this.
1594ret_t0: bv	0(r31)
1595
1596e_t0:	add	%r29,%r1,%r29
1597
1598e_shift: comb,<>	%r25,0,l2
1599
1600	zdep	%r26,23,24,%r26	; %r26 <<= 8 ***********
1601	bv,n  0(r31)
1602e_t0ma0: comb,<>	%r25,0,l0
1603
1604	sub	%r1,%r26,%r1
1605	bv	0(r31)
1606	add	%r29,%r1,%r29
1607e_t0a0: comb,<>	%r25,0,l0
1608
1609	add	%r1,%r26,%r1
1610	bv	0(r31)
1611	add	%r29,%r1,%r29
1612e_t02a0: comb,<>	%r25,0,l0
1613
1614	sh1add	%r26,%r1,%r1
1615	bv	0(r31)
1616	add	%r29,%r1,%r29
1617e_t04a0: comb,<>	%r25,0,l0
1618
1619	sh2add	%r26,%r1,%r1
1620	bv	0(r31)
1621	add	%r29,%r1,%r29
1622e_2t0: comb,<>	%r25,0,l1
1623
1624	sh1add	%r1,%r29,%r29
1625	bv,n  0(r31)
1626e_2t0a0: comb,<>	%r25,0,l0
1627
1628	sh1add	%r1,%r26,%r1
1629	bv	0(r31)
1630	add	%r29,%r1,%r29
1631e2t04a0: sh1add	%r26,%r1,%r1
1632
1633	comb,<>	%r25,0,l1
1634	sh1add	%r1,%r29,%r29
1635	bv,n  0(r31)
1636e_3t0: comb,<>	%r25,0,l0
1637
1638	sh1add	%r1,%r1,%r1
1639	bv	0(r31)
1640	add	%r29,%r1,%r29
1641e_4t0: comb,<>	%r25,0,l1
1642
1643	sh2add	%r1,%r29,%r29
1644	bv,n  0(r31)
1645e_4t0a0: comb,<>	%r25,0,l0
1646
1647	sh2add	%r1,%r26,%r1
1648	bv	0(r31)
1649	add	%r29,%r1,%r29
1650e4t08a0: sh1add	%r26,%r1,%r1
1651
1652	comb,<>	%r25,0,l1
1653	sh2add	%r1,%r29,%r29
1654	bv,n  0(r31)
1655e_5t0: comb,<>	%r25,0,l0
1656
1657	sh2add	%r1,%r1,%r1
1658	bv	0(r31)
1659	add	%r29,%r1,%r29
1660e_8t0: comb,<>	%r25,0,l1
1661
1662	sh3add	%r1,%r29,%r29
1663	bv,n  0(r31)
1664e_8t0a0: comb,<>	%r25,0,l0
1665
1666	sh3add	%r1,%r26,%r1
1667	bv	0(r31)
1668	add	%r29,%r1,%r29
1669
1670	.procend
1671	.end
1672
1673	.import $$divI_2,millicode
1674	.import $$divI_3,millicode
1675	.import $$divI_4,millicode
1676	.import $$divI_5,millicode
1677	.import $$divI_6,millicode
1678	.import $$divI_7,millicode
1679	.import $$divI_8,millicode
1680	.import $$divI_9,millicode
1681	.import $$divI_10,millicode
1682	.import $$divI_12,millicode
1683	.import $$divI_14,millicode
1684	.import $$divI_15,millicode
1685	.export $$divI,millicode
1686	.export	$$divoI,millicode
1687$$divoI:
1688	.proc
1689	.callinfo	NO_CALLS
1690	comib,=,n  -1,arg1,negative1	; when divisor == -1
1691$$divI:
1692	comib,>>=,n 15,arg1,small_divisor
1693	add,>=	0,arg0,retreg		; move dividend, if retreg < 0,
1694normal1:
1695	  sub	0,retreg,retreg		;   make it positive
1696	sub	0,arg1,temp		; clear carry,
1697					;   negate the divisor
1698	ds	0,temp,0		; set V-bit to the comple-
1699					;   ment of the divisor sign
1700	add	retreg,retreg,retreg	; shift msb bit into carry
1701	ds	r0,arg1,temp		; 1st divide step, if no carry
1702	addc	retreg,retreg,retreg	; shift retreg with/into carry
1703	ds	temp,arg1,temp		; 2nd divide step
1704	addc	retreg,retreg,retreg	; shift retreg with/into carry
1705	ds	temp,arg1,temp		; 3rd divide step
1706	addc	retreg,retreg,retreg	; shift retreg with/into carry
1707	ds	temp,arg1,temp		; 4th divide step
1708	addc	retreg,retreg,retreg	; shift retreg with/into carry
1709	ds	temp,arg1,temp		; 5th divide step
1710	addc	retreg,retreg,retreg	; shift retreg with/into carry
1711	ds	temp,arg1,temp		; 6th divide step
1712	addc	retreg,retreg,retreg	; shift retreg with/into carry
1713	ds	temp,arg1,temp		; 7th divide step
1714	addc	retreg,retreg,retreg	; shift retreg with/into carry
1715	ds	temp,arg1,temp		; 8th divide step
1716	addc	retreg,retreg,retreg	; shift retreg with/into carry
1717	ds	temp,arg1,temp		; 9th divide step
1718	addc	retreg,retreg,retreg	; shift retreg with/into carry
1719	ds	temp,arg1,temp		; 10th divide step
1720	addc	retreg,retreg,retreg	; shift retreg with/into carry
1721	ds	temp,arg1,temp		; 11th divide step
1722	addc	retreg,retreg,retreg	; shift retreg with/into carry
1723	ds	temp,arg1,temp		; 12th divide step
1724	addc	retreg,retreg,retreg	; shift retreg with/into carry
1725	ds	temp,arg1,temp		; 13th divide step
1726	addc	retreg,retreg,retreg	; shift retreg with/into carry
1727	ds	temp,arg1,temp		; 14th divide step
1728	addc	retreg,retreg,retreg	; shift retreg with/into carry
1729	ds	temp,arg1,temp		; 15th divide step
1730	addc	retreg,retreg,retreg	; shift retreg with/into carry
1731	ds	temp,arg1,temp		; 16th divide step
1732	addc	retreg,retreg,retreg	; shift retreg with/into carry
1733	ds	temp,arg1,temp		; 17th divide step
1734	addc	retreg,retreg,retreg	; shift retreg with/into carry
1735	ds	temp,arg1,temp		; 18th divide step
1736	addc	retreg,retreg,retreg	; shift retreg with/into carry
1737	ds	temp,arg1,temp		; 19th divide step
1738	addc	retreg,retreg,retreg	; shift retreg with/into carry
1739	ds	temp,arg1,temp		; 20th divide step
1740	addc	retreg,retreg,retreg	; shift retreg with/into carry
1741	ds	temp,arg1,temp		; 21st divide step
1742	addc	retreg,retreg,retreg	; shift retreg with/into carry
1743	ds	temp,arg1,temp		; 22nd divide step
1744	addc	retreg,retreg,retreg	; shift retreg with/into carry
1745	ds	temp,arg1,temp		; 23rd divide step
1746	addc	retreg,retreg,retreg	; shift retreg with/into carry
1747	ds	temp,arg1,temp		; 24th divide step
1748	addc	retreg,retreg,retreg	; shift retreg with/into carry
1749	ds	temp,arg1,temp		; 25th divide step
1750	addc	retreg,retreg,retreg	; shift retreg with/into carry
1751	ds	temp,arg1,temp		; 26th divide step
1752	addc	retreg,retreg,retreg	; shift retreg with/into carry
1753	ds	temp,arg1,temp		; 27th divide step
1754	addc	retreg,retreg,retreg	; shift retreg with/into carry
1755	ds	temp,arg1,temp		; 28th divide step
1756	addc	retreg,retreg,retreg	; shift retreg with/into carry
1757	ds	temp,arg1,temp		; 29th divide step
1758	addc	retreg,retreg,retreg	; shift retreg with/into carry
1759	ds	temp,arg1,temp		; 30th divide step
1760	addc	retreg,retreg,retreg	; shift retreg with/into carry
1761	ds	temp,arg1,temp		; 31st divide step
1762	addc	retreg,retreg,retreg	; shift retreg with/into carry
1763	ds	temp,arg1,temp		; 32nd divide step,
1764	addc	retreg,retreg,retreg	; shift last retreg bit into retreg
1765	xor,>=	arg0,arg1,0		; get correct sign of quotient
1766	  sub	0,retreg,retreg		;   based on operand signs
1767	bv,n  0(r31)
1768	nop
1769;______________________________________________________________________
1770small_divisor:
1771	blr,n	arg1,r0
1772	nop
1773; table for divisor == 0,1, ... ,15
1774	addit,=	0,arg1,r0	; trap if divisor == 0
1775	nop
1776	bv	0(r31)		; divisor == 1
1777	copy	arg0,retreg
1778	 b,n	$$divI_2	; divisor == 2
1779	nop
1780	 b,n	$$divI_3	; divisor == 3
1781	nop
1782	 b,n	$$divI_4	; divisor == 4
1783	nop
1784	 b,n	$$divI_5	; divisor == 5
1785	nop
1786	 b,n	$$divI_6	; divisor == 6
1787	nop
1788	 b,n	$$divI_7	; divisor == 7
1789	nop
1790	 b,n	$$divI_8	; divisor == 8
1791	nop
1792	 b,n	$$divI_9	; divisor == 9
1793	nop
1794	 b,n	$$divI_10	; divisor == 10
1795	nop
1796	b	normal1		; divisor == 11
1797	add,>=	0,arg0,retreg
1798	 b,n	$$divI_12	; divisor == 12
1799	nop
1800	b	normal1		; divisor == 13
1801	add,>=	0,arg0,retreg
1802	 b,n	$$divI_14	; divisor == 14
1803	nop
1804	 b,n	$$divI_15	; divisor == 15
1805	nop
1806;______________________________________________________________________
1807negative1:
1808	sub	0,arg0,retreg	; result is negation of dividend
1809	bv	0(r31)
1810	addo	arg0,arg1,r0	; trap iff dividend==0x80000000 && divisor==-1
1811	.procend
1812	.end
1813